From aa6fbb6157a95c1e4becd59b3f9a76e4a1ce0e1d Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Wed, 24 Mar 2021 22:27:02 +0530 Subject: [PATCH 001/666] HADOOP-15566 initial changes for opentelemetry --- hadoop-common-project/hadoop-common/pom.xml | 25 ++++++ .../java/org/apache/hadoop/tracing/Span.java | 27 ++++++- .../apache/hadoop/tracing/SpanContext.java | 58 +++++++++++++- .../org/apache/hadoop/tracing/TraceScope.java | 14 +++- .../org/apache/hadoop/tracing/TraceUtils.java | 41 +++++++++- .../org/apache/hadoop/tracing/Tracer.java | 77 ++++++++++++++++--- hadoop-project/pom.xml | 2 +- 7 files changed, 227 insertions(+), 17 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 0a2232dae98fa..2fc4832fcb3e4 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -371,6 +371,31 @@ lz4-java provided + + io.opentelemetry + opentelemetry-api + 1.0.0 + + + io.opentelemetry + opentelemetry-sdk + 1.0.0 + + + io.opentelemetry + opentelemetry-sdk-extension-autoconfigure + 1.0.0-alpha + + + io.opentelemetry + opentelemetry-exporter-jaeger + 1.0.0 + + + io.grpc + grpc-netty-shaded + 1.26.0 + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java index 197b29fa3dfe4..d6ddf964ba693 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java @@ -17,28 +17,51 @@ */ package org.apache.hadoop.tracing; +import io.opentelemetry.context.Scope; + import java.io.Closeable; public class Span implements Closeable { - + io.opentelemetry.api.trace.Span span = null; public Span() { } + public Span(io.opentelemetry.api.trace.Span span){ + this.span = span; + } + public Span addKVAnnotation(String key, String value) { + if(span != null){ + span.setAttribute(key, value); + } return this; } public Span addTimelineAnnotation(String msg) { + if(span != null){ + span.addEvent(msg); + } return this; } public SpanContext getContext() { - return null; + return new SpanContext(span.getSpanContext()); } public void finish() { + close(); } public void close() { + if(span != null){ + span.end(); + } + } + + public Scope makeCurrent() { + if(span != null){ + return span.makeCurrent(); + } + return null; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java index 363e94dc85dba..f4f6ff8da0453 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java @@ -18,15 +18,69 @@ package org.apache.hadoop.tracing; import java.io.Closeable; +import java.util.HashMap; +import java.util.Map; + +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.TraceFlags; +import io.opentelemetry.api.trace.TraceState; +import io.opentelemetry.api.trace.TraceStateBuilder; +import org.apache.hadoop.thirdparty.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Wrapper class for SpanContext to avoid using OpenTracing/OpenTelemetry * SpanContext class directly for better separation. */ -public class SpanContext implements Closeable { - public SpanContext() { +public class SpanContext implements Closeable { + public static final Logger LOG = LoggerFactory.getLogger(SpanContext.class.getName()); + private static final String TRACE_ID = "TRACE_ID"; + private static final String SPAN_ID = "SPAN_ID"; + private static final String TRACE_FLAGS = "TRACE_FLAGS"; + + + private io.opentelemetry.api.trace.SpanContext spanContext = null; + public SpanContext(io.opentelemetry.api.trace.SpanContext spanContext) { + this.spanContext = spanContext; } public void close() { + + } + + public Map getKVSpanContext(){ + if(spanContext != null){ + //TODO: may we should move this to Proto + Map kvMap = new HashMap<>(); + kvMap.put(TRACE_ID, spanContext.getTraceId()); + kvMap.put(SPAN_ID, spanContext.getSpanId()); + kvMap.put(TRACE_FLAGS, spanContext.getTraceFlags().asHex()); + kvMap.putAll(spanContext.getTraceState().asMap()); + return kvMap; + } + return null; + } + + static SpanContext buildFromKVMap(Map kvMap){ + String traceId = kvMap.get(TRACE_ID); + kvMap.remove(TRACE_ID); + String spanId = kvMap.get(SPAN_ID); + kvMap.remove(SPAN_ID); + String traceFlagsHex = kvMap.get(TRACE_FLAGS); + kvMap.remove(TRACE_FLAGS); + TraceFlags traceFlags = TraceFlags.fromHex(traceFlagsHex, 0); + TraceStateBuilder traceStateBuilder = TraceState.builder(); + for(Map.Entry keyValue: kvMap.entrySet()){ + traceStateBuilder.put(keyValue.getKey(), keyValue.getValue()); + } + TraceState traceState = traceStateBuilder.build(); + io.opentelemetry.api.trace.SpanContext spanContext = io.opentelemetry.api.trace.SpanContext.createFromRemoteParent(traceId, spanId, traceFlags, traceState ); + + return new SpanContext(spanContext); + } + + public io.opentelemetry.api.trace.SpanContext getSpanContext() { + return spanContext; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceScope.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceScope.java index 2abf9cb7ec2bf..ff4e104d1fee0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceScope.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceScope.java @@ -17,23 +17,32 @@ */ package org.apache.hadoop.tracing; +import io.opentelemetry.context.Scope; + import java.io.Closeable; public class TraceScope implements Closeable { Span span; - + Scope scope; public TraceScope(Span span) { this.span = span; + if(span != null){ + this.scope = span.makeCurrent(); + } } // Add tag to the span public void addKVAnnotation(String key, String value) { + span.addKVAnnotation(key, value); } public void addKVAnnotation(String key, Number value) { + span.addKVAnnotation(key, value.toString()); } + public void addTimelineAnnotation(String msg) { + span.addTimelineAnnotation(msg); } public Span span() { @@ -51,6 +60,9 @@ public void detach() { } public void close() { + if(scope != null){ + scope.close(); + } if (span != null) { span.close(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java index b218493780ee1..7ba73e461a916 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java @@ -20,12 +20,19 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.thirdparty.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.*; +import java.util.HashMap; +import java.util.Map; /** * This class provides utility functions for tracing. */ @InterfaceAudience.Private public class TraceUtils { + public static final Logger LOG = LoggerFactory.getLogger(TraceUtils.class.getName()); static final String DEFAULT_HADOOP_TRACE_PREFIX = "hadoop.htrace."; public static TraceConfiguration wrapHadoopConf(final String prefix, @@ -38,10 +45,40 @@ public static Tracer createAndRegisterTracer(String name) { } public static SpanContext byteStringToSpanContext(ByteString byteString) { - return null; + return deserialize(byteString); } public static ByteString spanContextToByteString(SpanContext context) { - return null; + Map kvMap = context.getKVSpanContext(); + ByteString byteString = serialize(kvMap); + return byteString; + } + + //Added this for tracing will remove this after having + // a discussion + static ByteString serialize(Object obj){ + try{ + ByteArrayOutputStream out = new ByteArrayOutputStream(); + ObjectOutputStream os = new ObjectOutputStream(out); + os.writeObject(obj); + os.flush(); + byte[] byteArray = out.toByteArray(); + return ByteString.copyFrom(byteArray); + } catch (Exception e){ + LOG.error("Error in searializing the object:", e); + return null; + } + } + + static SpanContext deserialize(ByteString spanContextByteString) { + try { + ByteArrayInputStream in = new ByteArrayInputStream(spanContextByteString.toByteArray()); + ObjectInputStream is = new ObjectInputStream(in); + Map kvMap = (Map) is.readObject(); + return SpanContext.buildFromKVMap(kvMap); + } catch (Exception e) { + LOG.error("Error in deserializing the object:", e); + return null; + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index a99b004b542f4..d24c9175bf65b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -17,20 +17,39 @@ */ package org.apache.hadoop.tracing; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.context.Context; +import io.opentelemetry.exporter.jaeger.JaegerGrpcSpanExporter; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.TimeUnit; + /** * No-Op Tracer (for now) to remove HTrace without changing too many files. */ public class Tracer { + public static final Logger LOG = LoggerFactory.getLogger(Tracer.class.getName()); // Singleton - private static final Tracer globalTracer = null; + private static Tracer globalTracer = null; + io.opentelemetry.api.trace.Tracer OTelTracer; private final NullTraceScope nullTraceScope; private final String name; public final static String SPAN_RECEIVER_CLASSES_KEY = "span.receiver.classes"; - public Tracer(String name) { + private Tracer(String name, io.opentelemetry.api.trace.Tracer tracer) { this.name = name; + OTelTracer = tracer; nullTraceScope = NullTraceScope.INSTANCE; } @@ -45,28 +64,32 @@ public static Tracer curThreadTracer() { * @return org.apache.hadoop.tracing.Span */ public static Span getCurrentSpan() { - return null; + io.opentelemetry.api.trace.Span span = io.opentelemetry.api.trace.Span.current(); + return span.getSpanContext().isValid()? new Span(span): null; } public TraceScope newScope(String description) { - return nullTraceScope; + Span span = new Span(OTelTracer.spanBuilder(description).startSpan()); + return new TraceScope(span); } public Span newSpan(String description, SpanContext spanCtx) { - return new Span(); + io.opentelemetry.api.trace.Span parentSpan = io.opentelemetry.api.trace.Span.wrap(spanCtx.getSpanContext()); + io.opentelemetry.api.trace.Span span = OTelTracer.spanBuilder(description).setParent(Context.current().with(parentSpan)).startSpan(); + return new Span(span); } public TraceScope newScope(String description, SpanContext spanCtx) { - return nullTraceScope; + return new TraceScope(newSpan(description, spanCtx)); } public TraceScope newScope(String description, SpanContext spanCtx, boolean finishSpanOnClose) { - return nullTraceScope; + return new TraceScope(newSpan(description, spanCtx)); } public TraceScope activateSpan(Span span) { - return nullTraceScope; + return new TraceScope(span); } public void close() { @@ -88,9 +111,45 @@ public Builder conf(TraceConfiguration conf) { return this; } + OpenTelemetry initialiseJaegerExporter(String jaegerHost, int jaegerPort, String name) { + ManagedChannel jaegerChannel = + ManagedChannelBuilder.forAddress(jaegerHost, jaegerPort).usePlaintext().build(); + // Export traces to Jaeger + JaegerGrpcSpanExporter jaegerExporter = + JaegerGrpcSpanExporter.builder() + .setChannel(jaegerChannel) + .setTimeout(30, TimeUnit.SECONDS) + .build(); + Resource serviceNameResource = + Resource.create(Attributes.of(AttributeKey.stringKey("service.name"), name)); + // Set to process the spans by the Jaeger Exporter + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(jaegerExporter)) + .setResource(Resource.getDefault().merge(serviceNameResource)) + .build(); + OpenTelemetrySdk openTelemetry = + OpenTelemetrySdk.builder().setTracerProvider(tracerProvider).build(); + + // it's always a good idea to shut down the SDK cleanly at JVM exit. + Runtime.getRuntime().addShutdownHook(new Thread(tracerProvider::close)); + + return openTelemetry; + } + + OpenTelemetry noOpTracer(){ + return OpenTelemetry.noop(); + } + public Tracer build() { if (globalTracer == null) { - globalTracer = new Tracer(name); + //jaeger tracing changes + OpenTelemetry openTelemetry = initialiseJaegerExporter("localhost", 14250, name); + io.opentelemetry.api.trace.Tracer tracer = openTelemetry.getTracer(name); + globalTracer = new Tracer(name, tracer); + + //globalTracer = new Tracer(name, noOpTracer().getTracer(name)); + Tracer.globalTracer = globalTracer; } return globalTracer; } diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index c26ceacc67308..08b0112e64ea1 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -86,7 +86,7 @@ 1.1 - 2.5.0 + 3.7.1 3.7.1 ${env.HADOOP_PROTOC_PATH} From d132b8b666dd84df3f0efd1f200e70d1d5adca34 Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Tue, 13 Apr 2021 22:26:49 +0530 Subject: [PATCH 002/666] review comments changes --- .../src/main/java/org/apache/hadoop/tracing/Span.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java index d6ddf964ba693..acc1c47fc9e73 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java @@ -22,7 +22,7 @@ import java.io.Closeable; public class Span implements Closeable { - io.opentelemetry.api.trace.Span span = null; + private io.opentelemetry.api.trace.Span span = null; public Span() { } @@ -45,7 +45,10 @@ public Span addTimelineAnnotation(String msg) { } public SpanContext getContext() { - return new SpanContext(span.getSpanContext()); + if(span != null){ + return new SpanContext(span.getSpanContext()); + } + return null; } public void finish() { From 84caa89c7ac745c05ad0e7505073ee6e015fa0ec Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Tue, 20 Apr 2021 15:11:01 +0530 Subject: [PATCH 003/666] changes to use autoinitialize and maven depdendecy changes --- hadoop-common-project/hadoop-common/pom.xml | 16 +----- .../org/apache/hadoop/tracing/Tracer.java | 55 ++++--------------- hadoop-project/pom.xml | 11 ++++ 3 files changed, 25 insertions(+), 57 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 2fc4832fcb3e4..faf1511e57f23 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -374,27 +374,17 @@ io.opentelemetry opentelemetry-api - 1.0.0 + ${opentelemetry.version} io.opentelemetry opentelemetry-sdk - 1.0.0 + ${opentelemetry.version} io.opentelemetry opentelemetry-sdk-extension-autoconfigure - 1.0.0-alpha - - - io.opentelemetry - opentelemetry-exporter-jaeger - 1.0.0 - - - io.grpc - grpc-netty-shaded - 1.26.0 + ${opentelemetry.version}-alpha diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index d24c9175bf65b..534ecd7ed293d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -17,22 +17,12 @@ */ package org.apache.hadoop.tracing; -import io.grpc.ManagedChannel; -import io.grpc.ManagedChannelBuilder; import io.opentelemetry.api.OpenTelemetry; -import io.opentelemetry.api.common.AttributeKey; -import io.opentelemetry.api.common.Attributes; import io.opentelemetry.context.Context; -import io.opentelemetry.exporter.jaeger.JaegerGrpcSpanExporter; -import io.opentelemetry.sdk.OpenTelemetrySdk; -import io.opentelemetry.sdk.resources.Resource; -import io.opentelemetry.sdk.trace.SdkTracerProvider; -import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import io.opentelemetry.sdk.autoconfigure.OpenTelemetrySdkAutoConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.concurrent.TimeUnit; - /** * No-Op Tracer (for now) to remove HTrace without changing too many files. */ @@ -99,6 +89,8 @@ public String getName() { return name; } + + public static class Builder { static Tracer globalTracer; private String name; @@ -111,44 +103,19 @@ public Builder conf(TraceConfiguration conf) { return this; } - OpenTelemetry initialiseJaegerExporter(String jaegerHost, int jaegerPort, String name) { - ManagedChannel jaegerChannel = - ManagedChannelBuilder.forAddress(jaegerHost, jaegerPort).usePlaintext().build(); - // Export traces to Jaeger - JaegerGrpcSpanExporter jaegerExporter = - JaegerGrpcSpanExporter.builder() - .setChannel(jaegerChannel) - .setTimeout(30, TimeUnit.SECONDS) - .build(); - Resource serviceNameResource = - Resource.create(Attributes.of(AttributeKey.stringKey("service.name"), name)); - // Set to process the spans by the Jaeger Exporter - SdkTracerProvider tracerProvider = - SdkTracerProvider.builder() - .addSpanProcessor(SimpleSpanProcessor.create(jaegerExporter)) - .setResource(Resource.getDefault().merge(serviceNameResource)) - .build(); - OpenTelemetrySdk openTelemetry = - OpenTelemetrySdk.builder().setTracerProvider(tracerProvider).build(); - - // it's always a good idea to shut down the SDK cleanly at JVM exit. - Runtime.getRuntime().addShutdownHook(new Thread(tracerProvider::close)); - + static OpenTelemetry initialiseTracer(String name) { + //TODO: Explore the possibility of moving these properties to config or environment variables + System.setProperty("otel.metrics.exporter", "none"); + System.setProperty("otel.traces.exporter", "jaeger"); + System.setProperty("otel.exporter.jaeger.endpoint", "http://localhost:14250"); + System.setProperty("otel.resource.attributes", String.format("service.name=%s", name)); + OpenTelemetry openTelemetry = OpenTelemetrySdkAutoConfiguration.initialize(); return openTelemetry; } - OpenTelemetry noOpTracer(){ - return OpenTelemetry.noop(); - } - public Tracer build() { if (globalTracer == null) { - //jaeger tracing changes - OpenTelemetry openTelemetry = initialiseJaegerExporter("localhost", 14250, name); - io.opentelemetry.api.trace.Tracer tracer = openTelemetry.getTracer(name); - globalTracer = new Tracer(name, tracer); - - //globalTracer = new Tracer(name, noOpTracer().getTracer(name)); + globalTracer = new Tracer(name, initialiseTracer(name).getTracer(name)); Tracer.globalTracer = globalTracer; } return globalTracer; diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 08b0112e64ea1..ba2e64ea2c30f 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -210,6 +210,7 @@ 7.7.0 1.0.7.Final 1.0.2 + 1.0.0 @@ -1754,6 +1755,16 @@ lz4-java ${lz4-java.version} + + io.opentelemetry + opentelemetry-exporter-jaeger + 1.0.0 + + + io.grpc + grpc-netty-shaded + 1.26.0 + From 48503df9bf705a1d95d8992f66f5014a2516eb0d Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Wed, 21 Apr 2021 12:35:17 +0530 Subject: [PATCH 004/666] jaeger maven dependency changes for test --- hadoop-common-project/hadoop-common/pom.xml | 12 ++++++++++++ .../main/java/org/apache/hadoop/tracing/Tracer.java | 2 +- hadoop-project/pom.xml | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index faf1511e57f23..1acb39f056c2d 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -386,6 +386,18 @@ opentelemetry-sdk-extension-autoconfigure ${opentelemetry.version}-alpha + + io.opentelemetry + opentelemetry-exporter-jaeger + ${opentelemetry.version} + test + + + io.grpc + grpc-netty-shaded + 1.26.0 + test + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index 534ecd7ed293d..5fe885f0ec798 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -113,7 +113,7 @@ static OpenTelemetry initialiseTracer(String name) { return openTelemetry; } - public Tracer build() { + public synchronized Tracer build() { if (globalTracer == null) { globalTracer = new Tracer(name, initialiseTracer(name).getTracer(name)); Tracer.globalTracer = globalTracer; diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 38b0e9bc123f3..e950ed9c6f208 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1760,7 +1760,7 @@ io.opentelemetry opentelemetry-exporter-jaeger - 1.0.0 + ${opentelemetry.version} io.grpc From 0f668495973a635f0cb668af3d92c018e3a22a36 Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Sat, 24 Apr 2021 16:58:45 +0530 Subject: [PATCH 005/666] changes for noop tracer --- hadoop-common-project/hadoop-common/pom.xml | 12 ------------ .../main/java/org/apache/hadoop/tracing/Tracer.java | 3 +-- hadoop-project/pom.xml | 10 ---------- 3 files changed, 1 insertion(+), 24 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 1acb39f056c2d..faf1511e57f23 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -386,18 +386,6 @@ opentelemetry-sdk-extension-autoconfigure ${opentelemetry.version}-alpha - - io.opentelemetry - opentelemetry-exporter-jaeger - ${opentelemetry.version} - test - - - io.grpc - grpc-netty-shaded - 1.26.0 - test - diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index 5fe885f0ec798..b53f5c692d592 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -106,8 +106,7 @@ public Builder conf(TraceConfiguration conf) { static OpenTelemetry initialiseTracer(String name) { //TODO: Explore the possibility of moving these properties to config or environment variables System.setProperty("otel.metrics.exporter", "none"); - System.setProperty("otel.traces.exporter", "jaeger"); - System.setProperty("otel.exporter.jaeger.endpoint", "http://localhost:14250"); + System.setProperty("otel.traces.exporter", "none"); System.setProperty("otel.resource.attributes", String.format("service.name=%s", name)); OpenTelemetry openTelemetry = OpenTelemetrySdkAutoConfiguration.initialize(); return openTelemetry; diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index e950ed9c6f208..b3c3b1f2e466c 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1757,16 +1757,6 @@ lz4-java ${lz4-java.version} - - io.opentelemetry - opentelemetry-exporter-jaeger - ${opentelemetry.version} - - - io.grpc - grpc-netty-shaded - 1.26.0 - From cdc9075519c8dd948e15032460ddb92ec063448e Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Tue, 1 Jun 2021 22:24:45 +0530 Subject: [PATCH 006/666] changes to use environment variables for tracing exporter --- .../hadoop-common/src/main/conf/hadoop-env.sh | 6 ++++ .../org/apache/hadoop/tracing/Tracer.java | 12 ++++--- .../org/apache/hadoop/tracing/TestTracer.java | 33 +++++++++++++++++++ 3 files changed, 47 insertions(+), 4 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/TestTracer.java diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index f4625f5999b1c..db05942e5dc70 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -428,3 +428,9 @@ export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} # By default, Hadoop uses jsvc which needs to know to launch a # server jvm. # export HADOOP_REGISTRYDNS_SECURE_EXTRA_OPTS="-jvm server" + +### +# Opentelemetry Trace Exporters Configuration +### +export OTEL_TRACES_EXPORTER=none +export OTEL_METRICS_EXPORTER=none \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index b53f5c692d592..dadebbab2ae00 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -22,7 +22,6 @@ import io.opentelemetry.sdk.autoconfigure.OpenTelemetrySdkAutoConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * No-Op Tracer (for now) to remove HTrace without changing too many files. */ @@ -104,14 +103,19 @@ public Builder conf(TraceConfiguration conf) { } static OpenTelemetry initialiseTracer(String name) { - //TODO: Explore the possibility of moving these properties to config or environment variables - System.setProperty("otel.metrics.exporter", "none"); - System.setProperty("otel.traces.exporter", "none"); + //added to avoid test failures + setOTelEnvVariables("none", "none"); System.setProperty("otel.resource.attributes", String.format("service.name=%s", name)); OpenTelemetry openTelemetry = OpenTelemetrySdkAutoConfiguration.initialize(); return openTelemetry; } + //this method is added to set the environment variables for testing + private static void setOTelEnvVariables(String metricsExporter, String tracesExporter) { + System.setProperty("otel.metrics.exporter", metricsExporter); + System.setProperty("otel.traces.exporter", tracesExporter); + } + public synchronized Tracer build() { if (globalTracer == null) { globalTracer = new Tracer(name, initialiseTracer(name).getTracer(name)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/TestTracer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/TestTracer.java new file mode 100644 index 0000000000000..275f09637c340 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/TestTracer.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tracing; + +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestTracer { + @Test + public void testNoopTracer(){ + Tracer tracer = new Tracer.Builder("testTracer").build(); + assertNotNull(tracer.OTelTracer); + } + +} + + From 5c02f36f1db27313fa16c131723f8971dd264e72 Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Tue, 15 Jun 2021 13:21:53 +0530 Subject: [PATCH 007/666] set otel system properties only when env variables are not --- .../main/java/org/apache/hadoop/tracing/Tracer.java | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index dadebbab2ae00..a3d28503d005c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -112,8 +112,16 @@ static OpenTelemetry initialiseTracer(String name) { //this method is added to set the environment variables for testing private static void setOTelEnvVariables(String metricsExporter, String tracesExporter) { - System.setProperty("otel.metrics.exporter", metricsExporter); - System.setProperty("otel.traces.exporter", tracesExporter); + if(System.getenv().get("OTEL_TRACES_EXPORTER") == null){ + System.setProperty("otel.metrics.exporter", metricsExporter); + } else { + LOG.info("Tracing Span Exporter set to :" + System.getenv().get("OTEL_TRACES_EXPORTER")); + } + if(System.getenv().get("OTEL_METRICS_EXPORTER") == null){ + System.setProperty("otel.traces.exporter", tracesExporter); + } else { + LOG.info("Tracing Span Exporter set to :" + System.getenv().get("OTEL_METRICS_EXPORTER")); + } } public synchronized Tracer build() { From 310e46e4d63f550af3bb47d5c79b367069f0616a Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Tue, 15 Jun 2021 13:34:31 +0530 Subject: [PATCH 008/666] changes to upgrade to latest and test jaeger --- hadoop-project/pom.xml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 1f0be3b2976a3..c53a0f56e4ea7 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -212,7 +212,7 @@ 5.3.0 2.4.2 9.8.1 - 1.0.0 + 1.3.0 @@ -1757,6 +1757,16 @@ lz4-java ${lz4-java.version} + + io.opentelemetry + opentelemetry-exporter-jaeger + ${opentelemetry.version} + + + io.grpc + grpc-netty-shaded + 1.26.0 + From aab9744d8a9135e067f6cf6aec148c66e3f3b43e Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Thu, 12 Aug 2021 00:30:21 +0530 Subject: [PATCH 009/666] opentelemetry using javaagent --- hadoop-common-project/hadoop-common/pom.xml | 12 ++--- .../hadoop-common/src/main/conf/hadoop-env.sh | 7 ++- .../org/apache/hadoop/tracing/TraceUtils.java | 1 - .../org/apache/hadoop/tracing/Tracer.java | 47 ++----------------- hadoop-project/pom.xml | 13 ++--- 5 files changed, 19 insertions(+), 61 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index faf1511e57f23..ea4c9520c9e17 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -377,14 +377,10 @@ ${opentelemetry.version} - io.opentelemetry - opentelemetry-sdk - ${opentelemetry.version} - - - io.opentelemetry - opentelemetry-sdk-extension-autoconfigure - ${opentelemetry.version}-alpha + io.opentelemetry.javaagent + opentelemetry-javaagent + ${opentelemetry-instrumentation.version} + all diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index db05942e5dc70..42d130e8dfb10 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -432,5 +432,8 @@ export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} ### # Opentelemetry Trace Exporters Configuration ### -export OTEL_TRACES_EXPORTER=none -export OTEL_METRICS_EXPORTER=none \ No newline at end of file +export HADOOP_TRACE_OPTS="-javaagent: -Dotel.traces.exporter=jaeger -Dotel.metrics.exporter=none" +export HDFS_NAMENODE_OPTS="$HDFS_NAMENODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_NAMENODE" +echo "hdfs namenode options: ".$HDFS_HDFS_NAMENODE_OPTS +export HDFS_DATANODE_OPTS="$HDFS_DATANODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_DATANODE" +export HDFS_SECONDARYNAMENODE_OPTS="$HDFS_SECONDARYNAMENODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_SECONDARYNAMENODE" \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java index 7ba73e461a916..ccb65c4e08094 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java @@ -24,7 +24,6 @@ import org.slf4j.LoggerFactory; import java.io.*; -import java.util.HashMap; import java.util.Map; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index a3d28503d005c..f3adaa831f78c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.tracing; -import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.GlobalOpenTelemetry; import io.opentelemetry.context.Context; -import io.opentelemetry.sdk.autoconfigure.OpenTelemetrySdkAutoConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** @@ -27,20 +26,15 @@ */ public class Tracer { public static final Logger LOG = LoggerFactory.getLogger(Tracer.class.getName()); + private static final String INSTRUMENTATION_NAME = "io.opentelemetry.contrib.hadoop"; // Singleton private static Tracer globalTracer = null; - io.opentelemetry.api.trace.Tracer OTelTracer; - private final NullTraceScope nullTraceScope; - private final String name; + io.opentelemetry.api.trace.Tracer OTelTracer = GlobalOpenTelemetry.getTracer(INSTRUMENTATION_NAME); public final static String SPAN_RECEIVER_CLASSES_KEY = "span.receiver.classes"; - private Tracer(String name, io.opentelemetry.api.trace.Tracer tracer) { - this.name = name; - OTelTracer = tracer; - nullTraceScope = NullTraceScope.INSTANCE; - } + private Tracer() {} // Keeping this function at the moment for HTrace compatiblity, // in fact all threads share a single global tracer for OpenTracing. @@ -84,14 +78,9 @@ public TraceScope activateSpan(Span span) { public void close() { } - public String getName() { - return name; - } - - public static class Builder { - static Tracer globalTracer; + static Tracer globalTracer = new Tracer(); private String name; public Builder(final String name) { @@ -102,33 +91,7 @@ public Builder conf(TraceConfiguration conf) { return this; } - static OpenTelemetry initialiseTracer(String name) { - //added to avoid test failures - setOTelEnvVariables("none", "none"); - System.setProperty("otel.resource.attributes", String.format("service.name=%s", name)); - OpenTelemetry openTelemetry = OpenTelemetrySdkAutoConfiguration.initialize(); - return openTelemetry; - } - - //this method is added to set the environment variables for testing - private static void setOTelEnvVariables(String metricsExporter, String tracesExporter) { - if(System.getenv().get("OTEL_TRACES_EXPORTER") == null){ - System.setProperty("otel.metrics.exporter", metricsExporter); - } else { - LOG.info("Tracing Span Exporter set to :" + System.getenv().get("OTEL_TRACES_EXPORTER")); - } - if(System.getenv().get("OTEL_METRICS_EXPORTER") == null){ - System.setProperty("otel.traces.exporter", tracesExporter); - } else { - LOG.info("Tracing Span Exporter set to :" + System.getenv().get("OTEL_METRICS_EXPORTER")); - } - } - public synchronized Tracer build() { - if (globalTracer == null) { - globalTracer = new Tracer(name, initialiseTracer(name).getTracer(name)); - Tracer.globalTracer = globalTracer; - } return globalTracer; } } diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index c53a0f56e4ea7..4989b8a6daf21 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -213,6 +213,7 @@ 2.4.2 9.8.1 1.3.0 + 1.3.0 @@ -1758,14 +1759,10 @@ ${lz4-java.version} - io.opentelemetry - opentelemetry-exporter-jaeger - ${opentelemetry.version} - - - io.grpc - grpc-netty-shaded - 1.26.0 + io.opentelemetry.javaagent + opentelemetry-javaagent + ${opentelemetry-instrumentation.version} + all From 1dbe981ae7ba5d1aedb4db0031a33dcf65d58191 Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Mon, 16 Aug 2021 10:38:20 +0530 Subject: [PATCH 010/666] added opentelemetry trace options for hdfs --- hadoop-common-project/hadoop-common/pom.xml | 6 ------ .../hadoop-common/src/main/conf/hadoop-env.sh | 11 ++++++----- hadoop-project/pom.xml | 14 ++++++-------- 3 files changed, 12 insertions(+), 19 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index ea4c9520c9e17..e8a154b130353 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -376,12 +376,6 @@ opentelemetry-api ${opentelemetry.version} - - io.opentelemetry.javaagent - opentelemetry-javaagent - ${opentelemetry-instrumentation.version} - all - diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index 42d130e8dfb10..fd1f46be12acc 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -431,9 +431,10 @@ export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} ### # Opentelemetry Trace Exporters Configuration +# TODO: javaagent is asummed to be stored in common/lib this should be moved to seperate directory ### -export HADOOP_TRACE_OPTS="-javaagent: -Dotel.traces.exporter=jaeger -Dotel.metrics.exporter=none" -export HDFS_NAMENODE_OPTS="$HDFS_NAMENODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_NAMENODE" -echo "hdfs namenode options: ".$HDFS_HDFS_NAMENODE_OPTS -export HDFS_DATANODE_OPTS="$HDFS_DATANODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_DATANODE" -export HDFS_SECONDARYNAMENODE_OPTS="$HDFS_SECONDARYNAMENODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_SECONDARYNAMENODE" \ No newline at end of file +# export OPENTELEMETRY_JAVAAGENT_PATH="$(find $HADOOP_HOME/share/hadoop/common/lib/ -name opentelemetry-javaagent*)" +# export HADOOP_TRACE_OPTS="-javaagent:$OPENTELEMETRY_JAVAAGENT_PATH -Dotel.traces.exporter=jaeger -Dotel.metrics.exporter=none" +# export HDFS_NAMENODE_OPTS="$HDFS_NAMENODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_NAMENODE" +# export HDFS_DATANODE_OPTS="$HDFS_DATANODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_DATANODE" +# export HDFS_SECONDARYNAMENODE_OPTS="$HDFS_SECONDARYNAMENODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_SECONDARYNAMENODE" \ No newline at end of file diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 4989b8a6daf21..d283ec55010bb 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -919,8 +919,12 @@ - - + + io.opentelemetry.javaagent + opentelemetry-javaagent + ${opentelemetry-instrumentation.version} + all + io.netty netty @@ -1758,12 +1762,6 @@ lz4-java ${lz4-java.version} - - io.opentelemetry.javaagent - opentelemetry-javaagent - ${opentelemetry-instrumentation.version} - all - From 9a6414c497a338c105644b7b8edabd0d2cd30ed0 Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Mon, 20 Sep 2021 08:48:31 +0530 Subject: [PATCH 011/666] changes to handle NPE and modified serialization and deserialization of span context --- .../hadoop-common/src/main/conf/hadoop-env.sh | 7 ++-- .../java/org/apache/hadoop/ipc/Server.java | 6 ++-- .../apache/hadoop/tracing/SpanContext.java | 32 +++++++++++-------- .../org/apache/hadoop/tracing/TraceUtils.java | 13 +++++--- .../org/apache/hadoop/tracing/Tracer.java | 3 ++ .../org/apache/hadoop/util/ProtoUtil.java | 4 +-- .../src/main/proto/RpcHeader.proto | 2 +- .../datatransfer/DataTransferProtoUtil.java | 4 +-- .../hdfs/protocol/datatransfer/Sender.java | 8 ++--- .../src/main/proto/datatransfer.proto | 2 +- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 6 ++++ .../hdfs/protocol/datatransfer/Receiver.java | 15 +++++---- hadoop-project/pom.xml | 6 ---- 13 files changed, 63 insertions(+), 45 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index fd1f46be12acc..e7ad160c32041 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -435,6 +435,7 @@ export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} ### # export OPENTELEMETRY_JAVAAGENT_PATH="$(find $HADOOP_HOME/share/hadoop/common/lib/ -name opentelemetry-javaagent*)" # export HADOOP_TRACE_OPTS="-javaagent:$OPENTELEMETRY_JAVAAGENT_PATH -Dotel.traces.exporter=jaeger -Dotel.metrics.exporter=none" -# export HDFS_NAMENODE_OPTS="$HDFS_NAMENODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_NAMENODE" -# export HDFS_DATANODE_OPTS="$HDFS_DATANODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_DATANODE" -# export HDFS_SECONDARYNAMENODE_OPTS="$HDFS_SECONDARYNAMENODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=HDFS_SECONDARYNAMENODE" \ No newline at end of file +# export HDFS_NAMENODE_OPTS="$HDFS_NAMENODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=hdfs-namenode" +# export HDFS_DATANODE_OPTS="$HDFS_DATANODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=hdfs-datanode" +# export HDFS_SECONDARYNAMENODE_OPTS="$HDFS_SECONDARYNAMENODE_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=hdfs-secondarynamenode" +# export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS $HADOOP_TRACE_OPTS -Dotel.resource.attributes=service.name=hadoop-client" \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 922c6e937fb9b..36d9c171cdcb6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -2783,15 +2783,15 @@ private void processRpcRequest(RpcRequestHeaderProto header, Span span = null; if (header.hasTraceInfo()) { RPCTraceInfoProto traceInfoProto = header.getTraceInfo(); - if (traceInfoProto.hasSpanContext()) { + if (traceInfoProto.getSpanContextMap() != null) { if (tracer == null) { setTracer(Tracer.curThreadTracer()); } if (tracer != null) { // If the incoming RPC included tracing info, always continue the // trace - SpanContext spanCtx = TraceUtils.byteStringToSpanContext( - traceInfoProto.getSpanContext()); + SpanContext spanCtx = TraceUtils.mapToSpanContext( + traceInfoProto.getSpanContextMap()); if (spanCtx != null) { span = tracer.newSpan( RpcClientUtil.toTraceName(rpcRequest.toString()), spanCtx); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java index f4f6ff8da0453..0cb7f65373412 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java @@ -63,21 +63,27 @@ public Map getKVSpanContext(){ } static SpanContext buildFromKVMap(Map kvMap){ - String traceId = kvMap.get(TRACE_ID); - kvMap.remove(TRACE_ID); - String spanId = kvMap.get(SPAN_ID); - kvMap.remove(SPAN_ID); - String traceFlagsHex = kvMap.get(TRACE_FLAGS); - kvMap.remove(TRACE_FLAGS); - TraceFlags traceFlags = TraceFlags.fromHex(traceFlagsHex, 0); - TraceStateBuilder traceStateBuilder = TraceState.builder(); - for(Map.Entry keyValue: kvMap.entrySet()){ - traceStateBuilder.put(keyValue.getKey(), keyValue.getValue()); + try{ + String traceId = kvMap.get(TRACE_ID); + String spanId = kvMap.get(SPAN_ID); + String traceFlagsHex = kvMap.get(TRACE_FLAGS); + TraceFlags traceFlags = TraceFlags.fromHex(traceFlagsHex, 0); + TraceStateBuilder traceStateBuilder = TraceState.builder(); + for(Map.Entry keyValue: kvMap.entrySet()){ + if(keyValue.getKey().equals(TRACE_ID) || keyValue.getKey().equals(SPAN_ID) || keyValue.getKey().equals(TRACE_FLAGS)){ + continue; + } + traceStateBuilder.put(keyValue.getKey(), keyValue.getValue()); + } + TraceState traceState = traceStateBuilder.build(); + io.opentelemetry.api.trace.SpanContext spanContext = io.opentelemetry.api.trace.SpanContext.createFromRemoteParent(traceId, spanId, traceFlags, traceState ); + return new SpanContext(spanContext); + } catch (Exception e){ + LOG.error("Error in processing remote context " + kvMap != null? kvMap.toString() : "", e); + return null; } - TraceState traceState = traceStateBuilder.build(); - io.opentelemetry.api.trace.SpanContext spanContext = io.opentelemetry.api.trace.SpanContext.createFromRemoteParent(traceId, spanId, traceFlags, traceState ); - return new SpanContext(spanContext); + } public io.opentelemetry.api.trace.SpanContext getSpanContext() { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java index ccb65c4e08094..8a7f82df0630a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java @@ -47,10 +47,15 @@ public static SpanContext byteStringToSpanContext(ByteString byteString) { return deserialize(byteString); } - public static ByteString spanContextToByteString(SpanContext context) { - Map kvMap = context.getKVSpanContext(); - ByteString byteString = serialize(kvMap); - return byteString; + public static SpanContext mapToSpanContext(Map kvMap) { + return SpanContext.buildFromKVMap(kvMap); + } + + public static Map spanContextToMap(SpanContext context) { + if(context == null){ + return null; + } + return context.getKVSpanContext(); } //Added this for tracing will remove this after having diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index f3adaa831f78c..21a0689f9be40 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -63,6 +63,9 @@ public Span newSpan(String description, SpanContext spanCtx) { } public TraceScope newScope(String description, SpanContext spanCtx) { + if(spanCtx == null){ + return new TraceScope(new Span(io.opentelemetry.api.trace.Span.getInvalid())); + } return new TraceScope(newSpan(description, spanCtx)); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java index 9807adc50d6d1..ebbdecb0af6a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java @@ -182,8 +182,8 @@ public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind, Span span = Tracer.getCurrentSpan(); if (span != null) { RPCTraceInfoProto.Builder traceInfoProtoBuilder = - RPCTraceInfoProto.newBuilder().setSpanContext( - TraceUtils.spanContextToByteString(span.getContext())); + RPCTraceInfoProto.newBuilder().putAllSpanContext( + TraceUtils.spanContextToMap(span.getContext())); result.setTraceInfo(traceInfoProtoBuilder); } diff --git a/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto b/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto index 760e8261b4ea7..13af0c85734d5 100644 --- a/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto +++ b/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto @@ -63,7 +63,7 @@ enum RpcKindProto { message RPCTraceInfoProto { optional int64 traceId = 1; // parentIdHigh optional int64 parentId = 2; // parentIdLow - optional bytes spanContext = 3; // Trace SpanContext + map spanContext = 3; // Trace SpanContext } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java index 85ee3ce4f4987..88541724ecf8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java @@ -91,8 +91,8 @@ static BaseHeaderProto buildBaseHeader(ExtendedBlock blk, Span span = Tracer.getCurrentSpan(); if (span != null) { DataTransferTraceInfoProto.Builder traceInfoProtoBuilder = - DataTransferTraceInfoProto.newBuilder().setSpanContext( - TraceUtils.spanContextToByteString(span.getContext())); + DataTransferTraceInfoProto.newBuilder().putAllSpanContext( + TraceUtils.spanContextToMap(span.getContext())); builder.setTraceInfo(traceInfoProtoBuilder); } return builder.build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java index 3d81a62993efc..cf29410aea0cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java @@ -216,8 +216,8 @@ public void releaseShortCircuitFds(SlotId slotId) throws IOException { Span span = Tracer.getCurrentSpan(); if (span != null) { DataTransferTraceInfoProto.Builder traceInfoProtoBuilder = - DataTransferTraceInfoProto.newBuilder().setSpanContext( - TraceUtils.spanContextToByteString(span.getContext())); + DataTransferTraceInfoProto.newBuilder().putAllSpanContext( + TraceUtils.spanContextToMap(span.getContext())); builder.setTraceInfo(traceInfoProtoBuilder); } ReleaseShortCircuitAccessRequestProto proto = builder.build(); @@ -232,8 +232,8 @@ public void requestShortCircuitShm(String clientName) throws IOException { Span span = Tracer.getCurrentSpan(); if (span != null) { DataTransferTraceInfoProto.Builder traceInfoProtoBuilder = - DataTransferTraceInfoProto.newBuilder().setSpanContext( - TraceUtils.spanContextToByteString(span.getContext())); + DataTransferTraceInfoProto.newBuilder().putAllSpanContext( + TraceUtils.spanContextToMap(span.getContext())); builder.setTraceInfo(traceInfoProtoBuilder); } ShortCircuitShmRequestProto proto = builder.build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto index 28a292e729e2a..9741bccf19ad9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto @@ -60,7 +60,7 @@ message BaseHeaderProto { message DataTransferTraceInfoProto { optional uint64 traceId = 1; optional uint64 parentId = 2; - optional bytes spanContext = 3; + map spanContext = 3; } message ClientOperationHeaderProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 10d66d055ba35..626024d99a041 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -225,6 +225,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> lz4-java test + + io.opentelemetry.javaagent + opentelemetry-javaagent + ${opentelemetry-instrumentation.version} + all + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java index ab767e3b93810..951c812f89263 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java @@ -22,6 +22,7 @@ import java.io.DataInputStream; import java.io.IOException; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -79,13 +80,15 @@ protected final Op readOp() throws IOException { return Op.read(in); } - private TraceScope continueTraceSpan(ByteString spanContextBytes, + private TraceScope continueTraceSpan(Map spanContextMap, String description) { TraceScope scope = null; - SpanContext spanContext = - TraceUtils.byteStringToSpanContext(spanContextBytes); - if (spanContext != null) { - scope = tracer.newScope(description, spanContext); + if(!spanContextMap.isEmpty()){ + SpanContext spanContext = + TraceUtils.mapToSpanContext(spanContextMap); + if (spanContext != null) { + scope = tracer.newScope(description, spanContext); + } } return scope; } @@ -97,7 +100,7 @@ private TraceScope continueTraceSpan(ClientOperationHeaderProto header, private TraceScope continueTraceSpan(BaseHeaderProto header, String description) { - return continueTraceSpan(header.getTraceInfo().getSpanContext(), + return continueTraceSpan(header.getTraceInfo().getSpanContextMap(), description); } diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 76f39bf8590eb..1936752707a77 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -919,12 +919,6 @@ - - io.opentelemetry.javaagent - opentelemetry-javaagent - ${opentelemetry-instrumentation.version} - all - io.netty netty From 93e60d629e1932e18e59f733ea211e4eadefaa7d Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Mon, 20 Sep 2021 11:17:11 +0530 Subject: [PATCH 012/666] cleanup --- .../org/apache/hadoop/tracing/TraceUtils.java | 37 +------------------ hadoop-hdfs-project/hadoop-hdfs/pom.xml | 6 --- hadoop-project/pom.xml | 6 +++ 3 files changed, 8 insertions(+), 41 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java index 8a7f82df0630a..55c1a247f941a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java @@ -34,8 +34,7 @@ public class TraceUtils { public static final Logger LOG = LoggerFactory.getLogger(TraceUtils.class.getName()); static final String DEFAULT_HADOOP_TRACE_PREFIX = "hadoop.htrace."; - public static TraceConfiguration wrapHadoopConf(final String prefix, - final Configuration conf) { + public static TraceConfiguration wrapHadoopConf(final String prefix, final Configuration conf) { return null; } @@ -43,46 +42,14 @@ public static Tracer createAndRegisterTracer(String name) { return null; } - public static SpanContext byteStringToSpanContext(ByteString byteString) { - return deserialize(byteString); - } - public static SpanContext mapToSpanContext(Map kvMap) { return SpanContext.buildFromKVMap(kvMap); } public static Map spanContextToMap(SpanContext context) { - if(context == null){ + if (context == null) { return null; } return context.getKVSpanContext(); } - - //Added this for tracing will remove this after having - // a discussion - static ByteString serialize(Object obj){ - try{ - ByteArrayOutputStream out = new ByteArrayOutputStream(); - ObjectOutputStream os = new ObjectOutputStream(out); - os.writeObject(obj); - os.flush(); - byte[] byteArray = out.toByteArray(); - return ByteString.copyFrom(byteArray); - } catch (Exception e){ - LOG.error("Error in searializing the object:", e); - return null; - } - } - - static SpanContext deserialize(ByteString spanContextByteString) { - try { - ByteArrayInputStream in = new ByteArrayInputStream(spanContextByteString.toByteArray()); - ObjectInputStream is = new ObjectInputStream(in); - Map kvMap = (Map) is.readObject(); - return SpanContext.buildFromKVMap(kvMap); - } catch (Exception e) { - LOG.error("Error in deserializing the object:", e); - return null; - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 626024d99a041..10d66d055ba35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -225,12 +225,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> lz4-java test - - io.opentelemetry.javaagent - opentelemetry-javaagent - ${opentelemetry-instrumentation.version} - all - diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 1936752707a77..76f39bf8590eb 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -919,6 +919,12 @@ + + io.opentelemetry.javaagent + opentelemetry-javaagent + ${opentelemetry-instrumentation.version} + all + io.netty netty From e84c7201f502bcc946c9f7de70f5a062da5a63fd Mon Sep 17 00:00:00 2001 From: Hui Fei Date: Fri, 19 Feb 2021 09:07:22 +0800 Subject: [PATCH 013/666] HDFS-15830. Support to make dfs.image.parallel.load reconfigurable (#2694) --- .../hadoop/hdfs/server/namenode/FSImage.java | 1 + .../hdfs/server/namenode/FSImageFormat.java | 1 + .../namenode/FSImageFormatProtobuf.java | 24 +++++++++++++------ .../hadoop/hdfs/server/namenode/NameNode.java | 18 +++++++++++++- .../namenode/TestNameNodeReconfigure.java | 16 +++++++++++++ .../hadoop/hdfs/tools/TestDFSAdmin.java | 6 +++-- 6 files changed, 56 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index ccc693492973a..86b4150777edc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -172,6 +172,7 @@ protected FSImage(Configuration conf, this.editLog = FSEditLog.newInstance(conf, storage, editsDirs); archivalManager = new NNStorageRetentionManager(conf, storage, editLog); + FSImageFormatProtobuf.initParallelLoad(conf); } void format(FSNamesystem fsn, String clusterId, boolean force) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index dcc60bf5e7152..478cec55d0dd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -242,6 +242,7 @@ public void load(File file, boolean requireSameLayoutVersion) * the layout version. */ public static LoaderDelegator newLoader(Configuration conf, FSNamesystem fsn) { + return new LoaderDelegator(conf, fsn); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index 5c40efc797ca5..7556727e6293a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -88,6 +88,8 @@ public final class FSImageFormatProtobuf { private static final Logger LOG = LoggerFactory .getLogger(FSImageFormatProtobuf.class); + private static volatile boolean enableParallelLoad = false; + public static final class LoaderContext { private SerialNumberManager.StringTable stringTable; private final ArrayList refList = Lists.newArrayList(); @@ -582,9 +584,7 @@ private void loadErasureCodingSection(InputStream in) } private static boolean enableParallelSaveAndLoad(Configuration conf) { - boolean loadInParallel = - conf.getBoolean(DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY, - DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT); + boolean loadInParallel = enableParallelLoad; boolean compressionEnabled = conf.getBoolean( DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, DFSConfigKeys.DFS_IMAGE_COMPRESS_DEFAULT); @@ -600,6 +600,20 @@ private static boolean enableParallelSaveAndLoad(Configuration conf) { return loadInParallel; } + public static void initParallelLoad(Configuration conf) { + enableParallelLoad = + conf.getBoolean(DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY, + DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT); + } + + public static void refreshParallelSaveAndLoad(boolean enable) { + enableParallelLoad = enable; + } + + public static boolean getEnableParallelLoad() { + return enableParallelLoad; + } + public static final class Saver { public static final int CHECK_CANCEL_INTERVAL = 4096; private boolean writeSubSections = false; @@ -640,10 +654,6 @@ public int getInodesPerSubSection() { return inodesPerSubSection; } - public boolean shouldWriteSubSections() { - return writeSubSections; - } - /** * Commit the length and offset of a fsimage section to the summary index, * including the sub section, which will be committed before the section is diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 3225bab32a093..54671ea6cf4ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -123,6 +123,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT; @@ -325,7 +327,8 @@ public enum OperationCategory { DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, - DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY)); + DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, + DFS_IMAGE_PARALLEL_LOAD_KEY)); private static final String USAGE = "Usage: hdfs namenode [" + StartupOption.BACKUP.getName() + "] | \n\t[" @@ -2184,6 +2187,8 @@ protected String reconfigurePropertyImpl(String property, String newVal) .equals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY)) { reconfBlockPlacementPolicy(); return newVal; + } else if (property.equals(DFS_IMAGE_PARALLEL_LOAD_KEY)) { + return reconfigureParallelLoad(newVal); } else { throw new ReconfigurationException(property, newVal, getConf().get( property)); @@ -2359,6 +2364,17 @@ String reconfigureSPSModeEvent(String newVal, String property) return newVal; } + String reconfigureParallelLoad(String newVal) { + boolean enableParallelLoad; + if (newVal == null) { + enableParallelLoad = DFS_IMAGE_PARALLEL_LOAD_DEFAULT; + } else { + enableParallelLoad = Boolean.parseBoolean(newVal); + } + FSImageFormatProtobuf.refreshParallelSaveAndLoad(enableParallelLoad); + return Boolean.toString(enableParallelLoad); + } + @Override // ReconfigurableBase protected Configuration getNewConf() { return new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java index 3265bed80c1ce..ada7c82150a12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java @@ -24,6 +24,7 @@ import org.junit.Before; import org.junit.After; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; import static org.junit.Assert.*; import org.slf4j.Logger; @@ -378,6 +379,21 @@ public void testBlockInvalidateLimitAfterReconfigured() datanodeManager.getBlockInvalidateLimit()); } + @Test + public void testEnableParallelLoadAfterReconfigured() + throws ReconfigurationException { + final NameNode nameNode = cluster.getNameNode(); + + // By default, enableParallelLoad is false + assertEquals(false, FSImageFormatProtobuf.getEnableParallelLoad()); + + nameNode.reconfigureProperty(DFS_IMAGE_PARALLEL_LOAD_KEY, + Boolean.toString(true)); + + // After reconfigured, enableParallelLoad is true + assertEquals(true, FSImageFormatProtobuf.getEnableParallelLoad()); + } + @After public void shutDown() throws IOException { if (cluster != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 278db064fdd7a..736d66f2f4a78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY; @@ -429,11 +430,12 @@ public void testNameNodeGetReconfigurableProperties() throws IOException { final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("namenode", address, outs, errs); - assertEquals(12, outs.size()); + assertEquals(13, outs.size()); assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(1)); assertEquals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, outs.get(2)); assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(3)); - assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(4)); + assertEquals(DFS_IMAGE_PARALLEL_LOAD_KEY, outs.get(4)); + assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(5)); assertEquals(errs.size(), 0); } From 83db4658b2cbada1876859784443ec96be62dd37 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 19 Feb 2021 11:09:10 +0900 Subject: [PATCH 014/666] Revert "HADOOP-16870. Use spotbugs-maven-plugin instead of findbugs-maven-plugin (#2454)" This reverts commit 4cf35315838a6e65f87ed64aaa8f1d31594c7fcd. --- BUILDING.txt | 2 +- dev-support/Jenkinsfile | 6 +++--- dev-support/bin/hadoop.sh | 4 ++-- dev-support/docker/Dockerfile | 12 ++---------- dev-support/docker/Dockerfile_aarch64 | 12 ++---------- .../hadoop-cos/pom.xml | 5 +++-- hadoop-common-project/hadoop-auth/pom.xml | 4 ++-- hadoop-common-project/hadoop-kms/pom.xml | 4 ++-- hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++-- hadoop-common-project/hadoop-nfs/pom.xml | 4 ++-- hadoop-common-project/hadoop-registry/pom.xml | 5 +++-- hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 4 ++-- .../hadoop-mapreduce-client/pom.xml | 5 +++-- .../hadoop-mapreduce-examples/pom.xml | 5 +++-- hadoop-mapreduce-project/pom.xml | 12 ++++++++---- hadoop-project-dist/pom.xml | 5 +++-- hadoop-project/pom.xml | 17 +++++++++++++++++ hadoop-tools/hadoop-aliyun/pom.xml | 5 +++-- hadoop-tools/hadoop-archive-logs/pom.xml | 5 +++-- hadoop-tools/hadoop-aws/pom.xml | 5 +++-- hadoop-tools/hadoop-azure/pom.xml | 5 +++-- hadoop-tools/hadoop-datajoin/pom.xml | 5 +++-- hadoop-tools/hadoop-fs2img/pom.xml | 5 +++-- hadoop-tools/hadoop-gridmix/pom.xml | 5 +++-- hadoop-tools/hadoop-kafka/pom.xml | 5 +++-- hadoop-tools/hadoop-openstack/pom.xml | 5 +++-- hadoop-tools/hadoop-rumen/pom.xml | 5 +++-- hadoop-tools/hadoop-sls/pom.xml | 5 +++-- hadoop-tools/hadoop-streaming/pom.xml | 5 +++-- .../pom.xml | 4 ++-- hadoop-yarn-project/hadoop-yarn/pom.xml | 5 +++-- hadoop-yarn-project/pom.xml | 7 +++++-- pom.xml | 18 ------------------ 33 files changed, 106 insertions(+), 98 deletions(-) diff --git a/BUILDING.txt b/BUILDING.txt index 9bbb6dbf891a9..821056f81de7f 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -139,7 +139,7 @@ Maven build goals: * Compile : mvn compile [-Pnative] * Run tests : mvn test [-Pnative] [-Pshelltest] * Create JAR : mvn package - * Run spotbugs : mvn compile spotbugs:spotbugs + * Run findbugs : mvn compile findbugs:findbugs * Run checkstyle : mvn compile checkstyle:checkstyle * Install JAR in M2 cache : mvn install * Deploy JAR to Maven repo : mvn deploy diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 5d1b775a8aea1..1703d0153bfb6 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -120,11 +120,11 @@ pipeline { YETUS_ARGS+=("--proclimit=5500") YETUS_ARGS+=("--dockermemlimit=22g") - # -1 spotbugs issues that show up prior to the patch being applied - YETUS_ARGS+=("--spotbugs-strict-precheck") + # -1 findbugs issues that show up prior to the patch being applied + YETUS_ARGS+=("--findbugs-strict-precheck") # rsync these files back into the archive dir - YETUS_ARGS+=("--archive-list=checkstyle-errors.xml,spotbugsXml.xml") + YETUS_ARGS+=("--archive-list=checkstyle-errors.xml,findbugsXml.xml") # URL for user-side presentation in reports and such to our artifacts # (needs to match the archive bits below) diff --git a/dev-support/bin/hadoop.sh b/dev-support/bin/hadoop.sh index beebea8c97f6b..3343014aae8bb 100755 --- a/dev-support/bin/hadoop.sh +++ b/dev-support/bin/hadoop.sh @@ -482,7 +482,7 @@ function personality_file_tests fi if [[ ${filename} =~ \.java$ ]]; then - add_test spotbugs + add_test findbugs fi } @@ -550,7 +550,7 @@ function shadedclient_rebuild echo_and_redirect "${logfile}" \ "${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \ "${modules[@]}" \ - -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true + -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dfindbugs.skip=true count=$("${GREP}" -c '\[ERROR\]' "${logfile}") if [[ ${count} -gt 0 ]]; then diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 20cbbf8a04246..cf442902d3d9c 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -44,6 +44,7 @@ RUN apt-get -q update \ cmake \ curl \ doxygen \ + findbugs \ fuse \ g++ \ gcc \ @@ -90,16 +91,7 @@ RUN apt-get -q update \ ENV MAVEN_HOME /usr # JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 - -####### -# Install SpotBugs 4.0.6 -####### -RUN mkdir -p /opt/spotbugs \ - && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.0.6/spotbugs-4.0.6.tgz \ - -o /opt/spotbugs.tgz \ - && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ - && chmod +x /opt/spotbugs/bin/* -ENV SPOTBUGS_HOME /opt/spotbugs +ENV FINDBUGS_HOME /usr ####### # Install Boost 1.72 (1.71 ships with Focal) diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index 8f95df8ee8ce4..bdf9e0c7e59f6 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -48,6 +48,7 @@ RUN apt-get -q update \ cmake \ curl \ doxygen \ + findbugs \ fuse \ g++ \ gcc \ @@ -94,16 +95,7 @@ RUN apt-get -q update \ ENV MAVEN_HOME /usr # JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-arm64 - -####### -# Install SpotBugs 4.0.6 -####### -RUN mkdir -p /opt/spotbugs \ - && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.0.6/spotbugs-4.0.6.tgz \ - -o /opt/spotbugs.tgz \ - && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ - && chmod +x /opt/spotbugs/bin/* -ENV SPOTBUGS_HOME /opt/spotbugs +ENV FINDBUGS_HOME /usr ####### # Install Boost 1.72 (1.71 ships with Focal) diff --git a/hadoop-cloud-storage-project/hadoop-cos/pom.xml b/hadoop-cloud-storage-project/hadoop-cos/pom.xml index fa47e354c7998..d18b09f450408 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cos/pom.xml @@ -64,9 +64,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 8e8526c7450e0..10e0b9c825677 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -237,8 +237,8 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 9de8b9caf6e68..338af127f8bd7 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -237,8 +237,8 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index c292aebbe3656..c76abf750b78d 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -53,8 +53,8 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml index f5de8407a0920..b3c727910e7c2 100644 --- a/hadoop-common-project/hadoop-nfs/pom.xml +++ b/hadoop-common-project/hadoop-nfs/pom.xml @@ -107,8 +107,8 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-registry/pom.xml b/hadoop-common-project/hadoop-registry/pom.xml index 0058832e6110b..5e3e2edcb9690 100644 --- a/hadoop-common-project/hadoop-registry/pom.xml +++ b/hadoop-common-project/hadoop-registry/pom.xml @@ -163,9 +163,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${project.basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 1916ef0e3b7f6..de7112270883a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -337,8 +337,8 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 55940ae4ff744..0d76285cded61 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -157,9 +157,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${mr.basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index 48cf27efe437f..ec3a0514b26fc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -138,9 +138,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${mr.examples.basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index 09b4d42606118..5ec1db63240ec 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -178,9 +178,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${mr.basedir}/dev-support/findbugs-exclude.xml Max @@ -298,9 +299,12 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + + 2.3.2 + true true diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index 10a0526519de2..410ec8635eecb 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -89,10 +89,11 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml + true 2048 diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 482ecb64ed186..c3a52d32af870 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -101,6 +101,7 @@ 3.5.6 4.2.0 3.0.5 + 4.0.6 2.1.7 27.0-jre @@ -1853,6 +1854,18 @@ maven-war-plugin ${maven-war-plugin.version} + + org.codehaus.mojo + findbugs-maven-plugin + ${findbugs.version} + + + com.github.spotbugs + spotbugs + ${spotbugs.version} + + + org.codehaus.mojo make-maven-plugin @@ -2086,6 +2099,10 @@ + + org.codehaus.mojo + findbugs-maven-plugin + org.apache.maven.plugins maven-antrun-plugin diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml index 8a68d9b4f7db6..284c6517821a8 100644 --- a/hadoop-tools/hadoop-aliyun/pom.xml +++ b/hadoop-tools/hadoop-aliyun/pom.xml @@ -58,9 +58,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml index 3caa26c52f884..10bc9a0833209 100644 --- a/hadoop-tools/hadoop-archive-logs/pom.xml +++ b/hadoop-tools/hadoop-archive-logs/pom.xml @@ -194,9 +194,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 0cab5ada2169d..f6d70230944fc 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -399,9 +399,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index d5d0f1fcd7303..4176305c32c53 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -50,9 +50,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-datajoin/pom.xml b/hadoop-tools/hadoop-datajoin/pom.xml index ec77281b66350..e26fa5dc33352 100644 --- a/hadoop-tools/hadoop-datajoin/pom.xml +++ b/hadoop-tools/hadoop-datajoin/pom.xml @@ -108,9 +108,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-fs2img/pom.xml b/hadoop-tools/hadoop-fs2img/pom.xml index 9a273621cfa9d..6f268ca616f91 100644 --- a/hadoop-tools/hadoop-fs2img/pom.xml +++ b/hadoop-tools/hadoop-fs2img/pom.xml @@ -87,9 +87,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-gridmix/pom.xml b/hadoop-tools/hadoop-gridmix/pom.xml index f383ecad64150..afa9cf5d3673e 100644 --- a/hadoop-tools/hadoop-gridmix/pom.xml +++ b/hadoop-tools/hadoop-gridmix/pom.xml @@ -123,9 +123,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml index a6f854a1ec272..8789b13c51a49 100644 --- a/hadoop-tools/hadoop-kafka/pom.xml +++ b/hadoop-tools/hadoop-kafka/pom.xml @@ -39,9 +39,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true Max diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml index e1065883feb9e..a4624c54e6871 100644 --- a/hadoop-tools/hadoop-openstack/pom.xml +++ b/hadoop-tools/hadoop-openstack/pom.xml @@ -66,9 +66,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml index fd3b5c9a1b8fa..6afc51c1f21c8 100644 --- a/hadoop-tools/hadoop-rumen/pom.xml +++ b/hadoop-tools/hadoop-rumen/pom.xml @@ -102,9 +102,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml index 5bb5a415d90f6..ce9197cd21dcc 100644 --- a/hadoop-tools/hadoop-sls/pom.xml +++ b/hadoop-tools/hadoop-sls/pom.xml @@ -108,9 +108,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml index f03eb001450b4..a419e10d753ff 100644 --- a/hadoop-tools/hadoop-streaming/pom.xml +++ b/hadoop-tools/hadoop-streaming/pom.xml @@ -129,9 +129,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml index 12b0112680320..072e70a894d35 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml @@ -389,8 +389,8 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin true diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index dff9a2b08a9f9..c8d9ddd9d007d 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -39,9 +39,10 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + true true ${yarn.basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index ae40fef3f716b..d05d1df2c548f 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -204,9 +204,12 @@ - com.github.spotbugs - spotbugs-maven-plugin + org.codehaus.mojo + findbugs-maven-plugin + + 2.3.2 + true true diff --git a/pom.xml b/pom.xml index 4ec386979d7e0..7e94cfb8ad0b0 100644 --- a/pom.xml +++ b/pom.xml @@ -113,8 +113,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x 3.1.0 8.29 1.4.3 - 4.0.6 - 4.0.4 bash @@ -344,18 +342,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x dependency-check-maven ${dependency-check-maven.version} - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - - com.github.spotbugs - spotbugs - ${spotbugs.version} - - - @@ -457,10 +443,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x dependency-check-maven ${dependency-check-maven.version} - - com.github.spotbugs - spotbugs-maven-plugin - From d938f4b96d5bd862b814aac5f1a9ac8c54a3cb5f Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Fri, 19 Feb 2021 14:44:58 +0900 Subject: [PATCH 015/666] HDFS-15835. Erasure coding: Add/remove logs for the better readability/debugging. Contributed by Bhavik Patel. --- .../hdfs/server/namenode/ErasureCodingPolicyManager.java | 5 +++-- .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 1 - .../hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 5 +++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index 73f5596eabf36..d5fdba84633ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -343,6 +343,7 @@ public synchronized ErasureCodingPolicy addPolicy( policiesByName.values().toArray(new ErasureCodingPolicyInfo[0]); allPersistedPolicies.put(policy.getId(), new ErasureCodingPolicyInfo(policy)); + LOG.info("Added erasure coding policy " + policy); return policy; } @@ -414,7 +415,7 @@ public synchronized boolean disablePolicy(String name) { enabledPolicies = enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]); info.setState(ErasureCodingPolicyState.DISABLED); - LOG.info("Disable the erasure coding policy " + name); + LOG.info("Disabled the erasure coding policy " + name); allPersistedPolicies.put(info.getPolicy().getId(), createPolicyInfo(info.getPolicy(), ErasureCodingPolicyState.DISABLED)); @@ -448,7 +449,7 @@ public synchronized boolean enablePolicy(String name) { enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]); allPersistedPolicies.put(ecPolicy.getId(), createPolicyInfo(info.getPolicy(), ErasureCodingPolicyState.ENABLED)); - LOG.info("Enable the erasure coding policy " + name); + LOG.info("Enabled the erasure coding policy " + name); return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index d13e21117efeb..cf836356d0063 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8226,7 +8226,6 @@ boolean disableErasureCodingPolicy(String ecPolicyName, checkOperation(OperationCategory.WRITE); checkErasureCodingSupported(operationName); boolean success = false; - LOG.info("Disable the erasure coding policy " + ecPolicyName); try { writeLock(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index f4ab8f9c8a8ef..70b12b3e198ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -2258,10 +2258,10 @@ public void setErasureCodingPolicy(String src, String ecPolicyName) try { if (ecPolicyName == null) { ecPolicyName = defaultECPolicyName; - LOG.trace("No policy name is specified, " + + LOG.debug("No policy name is specified, " + "set the default policy name instead"); } - LOG.trace("Set erasure coding policy " + ecPolicyName + " on " + src); + LOG.debug("Set erasure coding policy {} on {}", ecPolicyName, src); namesystem.setErasureCodingPolicy(src, ecPolicyName, cacheEntry != null); success = true; } finally { @@ -2491,6 +2491,7 @@ public void unsetErasureCodingPolicy(String src) throws IOException { } boolean success = false; try { + LOG.debug("Unset erasure coding policy on {}", src); namesystem.unsetErasureCodingPolicy(src, cacheEntry != null); success = true; } finally { From e795b8fbae9d03f9998d51765279ba5fa168fa4d Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Fri, 19 Feb 2021 12:01:31 +0100 Subject: [PATCH 016/666] YARN-10635. CSMapping rule can return paths with empty parts. Contributed by Gergely Pollak. --- .../placement/CSMappingPlacementRule.java | 33 +++++++++-- .../placement/MappingQueuePath.java | 15 +++++ .../placement/MappingRuleActions.java | 6 +- .../placement/TestCSMappingPlacementRule.java | 55 ++++++++++++++----- 4 files changed, 89 insertions(+), 20 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java index 28d2cb6131976..821d055ae1b7c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java @@ -194,6 +194,7 @@ private void setupGroupsForVariableContext(VariableContext vctx, String user) String secondaryGroup = null; Iterator it = groupsSet.iterator(); String primaryGroup = it.next(); + while (it.hasNext()) { String group = it.next(); if (this.queueManager.getQueue(group) != null) { @@ -203,8 +204,7 @@ private void setupGroupsForVariableContext(VariableContext vctx, String user) } if (secondaryGroup == null && LOG.isDebugEnabled()) { - LOG.debug("User {} is not associated with any Secondary " + - "Group. Hence it may use the 'default' queue", user); + LOG.debug("User {} is not associated with any Secondary group", user); } vctx.put("%primary_group", primaryGroup); @@ -223,7 +223,15 @@ private VariableContext createVariableContext( //To place queues specifically to default, users must use root.default if (!asc.getQueue().equals(YarnConfiguration.DEFAULT_QUEUE_NAME)) { vctx.put("%specified", asc.getQueue()); + } else { + //Adding specified as empty will prevent it to be undefined and it won't + //try to place the application to a queue named '%specified', queue path + //validation will reject the empty path or the path with empty parts, + //so we sill still hit the fallback action of this rule if no queue + //is specified + vctx.put("%specified", ""); } + vctx.put("%application", asc.getApplicationName()); vctx.put("%default", "root.default"); try { @@ -239,6 +247,12 @@ private VariableContext createVariableContext( private String validateAndNormalizeQueue( String queueName, boolean allowCreate) throws YarnException { MappingQueuePath path = new MappingQueuePath(queueName); + + if (path.hasEmptyPart()) { + throw new YarnException("Invalid path returned by rule: '" + + queueName + "'"); + } + String leaf = path.getLeafName(); String parent = path.getParent(); @@ -335,14 +349,19 @@ private MappingRuleResult evaluateRule( MappingRule rule, VariableContext variables) { MappingRuleResult result = rule.evaluate(variables); + if (LOG.isDebugEnabled()) { + LOG.debug("Evaluated rule '{}' with result: '{}'", rule, result); + } + if (result.getResult() == MappingRuleResultType.PLACE) { try { result.updateNormalizedQueue(validateAndNormalizeQueue( result.getQueue(), result.isCreateAllowed())); } catch (Exception e) { - LOG.info("Cannot place to queue '{}' returned by mapping rule. " + - "Reason: {}", result.getQueue(), e.getMessage()); result = rule.getFallback(); + LOG.info("Cannot place to queue '{}' returned by mapping rule. " + + "Reason: '{}' Fallback operation: '{}'", + result.getQueue(), e.getMessage(), result); } } @@ -451,6 +470,12 @@ public ApplicationPlacementContext getPlacementForApp( } } + if (LOG.isDebugEnabled()) { + LOG.debug("Placement final result '{}' for application '{}'", + (ret == null ? "null" : ret.getFullQueuePath()), + asc.getApplicationId()); + } + return ret; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingQueuePath.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingQueuePath.java index 2c018210f14e9..b1ccf70eda9e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingQueuePath.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingQueuePath.java @@ -69,6 +69,21 @@ private void setFromFullPath(String fullPath) { } } + /** + * Simple helper method to determine if the path contains any empty parts. + * @return true if there is at least one empty part of the path + */ + public boolean hasEmptyPart() { + String[] parts = getFullPath().split("\\."); + for (int i = 0; i < parts.length; i++) { + if (parts[i].equals("")) { + return true; + } + } + + return false; + } + /** * Getter for the parent part of the path. * @return Parent path of the queue, null if there is no parent. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleActions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleActions.java index 35d7276c78d5b..3f19dcdf4c996 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleActions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleActions.java @@ -66,13 +66,15 @@ public static class PlaceToQueueAction extends MappingRuleActionBase { * This method is the main logic of the action, it will replace all the * variables in the queuePattern with their respective values, then returns * a placementResult with the final queue name. + * * @param variables The variable context, which contains all the variables * @return The result of the action */ @Override public MappingRuleResult execute(VariableContext variables) { - String substituted = variables.replacePathVariables(queuePattern); - return MappingRuleResult.createPlacementResult(substituted, allowCreate); + String substituted = variables.replacePathVariables(queuePattern); + return MappingRuleResult.createPlacementResult( + substituted, allowCreate); } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestCSMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestCSMappingPlacementRule.java index 69f56ec1e7873..f0b19c45d0ce9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestCSMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestCSMappingPlacementRule.java @@ -64,7 +64,7 @@ public class TestCSMappingPlacementRule { "alice", ImmutableSet.of("p_alice", "user", "developer"), "bob", ImmutableSet.of("p_bob", "user", "developer"), "charlie", ImmutableSet.of("p_charlie", "user", "tester"), - "dave", ImmutableSet.of("user", "tester"), + "dave", ImmutableSet.of("user"), "emily", ImmutableSet.of("user", "tester", "developer") ); @@ -90,6 +90,7 @@ private void createQueueHierarchy(CapacitySchedulerQueueManager queueManager) { .withQueue("root.disambiguous.deep.disambiuser.disambi") .withQueue("root.disambiguous.deep.group.developer") .withManagedParentQueue("root.disambiguous.deep.dman") + .withDynamicParentQueue("root.dynamic") .build(); when(queueManager.getQueue(isNull())).thenReturn(null); @@ -151,8 +152,9 @@ private ApplicationSubmissionContext createApp(String name) { private void assertReject(String message, CSMappingPlacementRule engine, ApplicationSubmissionContext asc, String user) { try { - engine.getPlacementForApp(asc, user); - fail(message); + ApplicationPlacementContext apc = engine.getPlacementForApp(asc, user); + fail("Unexpected queue result: " + apc.getFullQueuePath() + " - " + + message); } catch (YarnException e) { //To prevent PlacementRule chaining present in PlacementManager //when an application is rejected an exception is thrown to make sure @@ -483,14 +485,36 @@ private MappingRule createGroupMapping(String group, String queue) { } @Test - public void testGroupMatching() throws IOException { + public void testGroupTargetMatching() throws IOException { ArrayList rules = new ArrayList<>(); - rules.add(createGroupMapping("p_alice", "root.man.p_alice")); - rules.add(createGroupMapping("developer", "root.man.developer")); + rules.add( + new MappingRule( + MappingRuleMatchers.createUserMatcher("alice"), + (new MappingRuleActions.PlaceToQueueAction( + "root.man.%primary_group", true)) + .setFallbackReject())); + + rules.add( + new MappingRule( + MappingRuleMatchers.createUserMatcher("bob"), + (new MappingRuleActions.PlaceToQueueAction( + "root.dynamic.%secondary_group.%user", true)) + .setFallbackReject())); + + rules.add( + new MappingRule( + MappingRuleMatchers.createUserMatcher("charlie"), + (new MappingRuleActions.PlaceToQueueAction( + "root.man.%secondary_group", true)) + .setFallbackReject())); - //everybody is in the user group, this should catch all - rules.add(createGroupMapping("user", "root.man.user")); + rules.add( + new MappingRule( + MappingRuleMatchers.createUserMatcher("dave"), + (new MappingRuleActions.PlaceToQueueAction( + "root.dynamic.%secondary_group.%user", true)) + .setFallbackReject())); CSMappingPlacementRule engine = setupEngine(true, rules); ApplicationSubmissionContext app = createApp("app"); @@ -499,12 +523,15 @@ public void testGroupMatching() throws IOException { "Alice should be placed to root.man.p_alice based on her primary group", engine, app, "alice", "root.man.p_alice"); assertPlace( - "Bob should be placed to root.man.developer based on his developer " + - "group", engine, app, "bob", "root.man.developer"); - assertPlace( - "Charlie should be placed to root.man.user because he is not a " + - "developer nor in the p_alice group", engine, app, "charlie", - "root.man.user"); + "Bob should be placed to root.dynamic.developer.bob based on his " + + "secondary group, since we have a queue named 'developer', bob " + + "identifies as a user with secondary_group 'developer'", engine, app, + "bob", "root.dynamic.developer.bob"); + assertReject("Charlie should get rejected because he neither of his" + + "groups have an ambiguous queue, so effectively he has no secondary " + + "group", engine, app, "charlie"); + assertReject("Dave should get rejected because he has no secondary group", + engine, app, "dave"); } void assertConfigTestResult(List rules) { From 08217e8921df173d9baa3fa6a4e1e0eecb92b1c3 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Fri, 19 Feb 2021 13:57:30 +0100 Subject: [PATCH 017/666] YARN-10636. CS Auto Queue creation should reject submissions with empty path parts. Contributed by Gergely Pollak. --- .../scheduler/capacity/CapacityScheduler.java | 15 +++++- ...estCapacitySchedulerAutoQueueCreation.java | 49 +++++++++++++++++++ 2 files changed, 62 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 21d62480fe0e0..36f831dca9459 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -962,6 +962,17 @@ private CSQueue getOrCreateQueueFromPlacementContext(ApplicationId if (placementContext == null) { fallbackContext = CSQueueUtils.extractQueuePath(queueName); } + + //we need to make sure there is no empty path parts present + String path = fallbackContext.getFullQueuePath(); + String[] pathParts = path.split("\\."); + for (int i = 0; i < pathParts.length; i++) { + if ("".equals(pathParts[i])) { + LOG.error("Application submitted to invalid path: '{}'", path); + return null; + } + } + if (fallbackContext.hasParentQueue()) { try { return autoCreateLeafQueue(fallbackContext); @@ -1022,8 +1033,8 @@ private void addApplication(ApplicationId applicationId, String queueName, } //Could be a potential auto-created leaf queue - CSQueue queue = getOrCreateQueueFromPlacementContext(applicationId, user, - queueName, placementContext, false); + CSQueue queue = getOrCreateQueueFromPlacementContext( + applicationId, user, queueName, placementContext, false); if (queue == null) { String message; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java index 4dc0fab16106c..be41a3307cd95 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; @@ -89,6 +90,7 @@ .NO_LABEL; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueueUtils.EPSILON; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.ROOT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -535,6 +537,53 @@ public void testParentQueueUpdateInQueueMappingFailsAfterAutoCreation() } } + + @Test + public void testAutoQueueCreationFailsForEmptyPathWithAQCAndWeightMode() + throws Exception { + if (mockRM != null) { + mockRM.stop(); + } + + //We need a special configuration we only need a V2 queue auto creation + //And weight mode, to allow dynamic auto queue creation for root + CapacitySchedulerConfiguration conf = setupSchedulerConfiguration(); + conf.setAutoQueueCreationV2Enabled(ROOT, true); + conf.setCapacity("root.default", "1w"); + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + + //Just a regular mockRM and CapacityScheduler instance + MockRM newMockRM = new MockRM(conf); + newMockRM.start(); + ((CapacityScheduler) newMockRM.getResourceScheduler()).start(); + + CapacityScheduler newCS = + (CapacityScheduler) newMockRM.getResourceScheduler(); + + try { + //submitting to root..user, this should fail WITHOUT crashing the RM + submitApp(newCS, USER0, "user", "root."); + + RMContext rmContext = mock(RMContext.class); + when(rmContext.getDispatcher()).thenReturn(dispatcher); + newCS.setRMContext(rmContext); + + ApplicationId appId = BuilderUtils.newApplicationId(1, 1); + SchedulerEvent addAppEvent = new AppAddedSchedulerEvent( + appId, "user", "root."); + newCS.handle(addAppEvent); + + RMAppEvent event = new RMAppEvent(appId, RMAppEventType.APP_REJECTED, + "error"); + dispatcher.spyOnNextEvent(event, 10000); + } finally { + ((CapacityScheduler) newMockRM.getResourceScheduler()).stop(); + newMockRM.stop(); + } + } + + /** * This test case checks if a mapping rule can put an application to an auto * created queue even if an other queue with the same leaf name already From ad8b0522f06eb983ba36c6079a2d2a38b1bfb996 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Fri, 19 Feb 2021 22:29:10 +0000 Subject: [PATCH 018/666] YARN-10501. Can't remove all node labels after add node label without nodemanager port. Contributed by caozhiqiang. --- .../nodelabels/CommonNodeLabelsManager.java | 47 +++++++++++++++++++ .../TestCommonNodeLabelsManager.java | 18 +++++++ 2 files changed, 65 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java index 4fd4bd6bd72f4..8fd0315fa8577 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java @@ -559,6 +559,50 @@ private void replaceNodeForLabels(NodeId node, Set oldLabels, addNodeToLabels(node, newLabels); } + private void addLabelsToNodeInHost(NodeId node, Set labels) + throws IOException { + Host host = nodeCollections.get(node.getHost()); + if (null == host) { + throw new IOException("Cannot add labels to a host that " + + "does not exist. Create the host before adding labels to it."); + } + Node nm = host.nms.get(node); + if (nm != null) { + Node newNm = nm.copy(); + if (newNm.labels == null) { + newNm.labels = + Collections.newSetFromMap(new ConcurrentHashMap()); + } + newNm.labels.addAll(labels); + host.nms.put(node, newNm); + } + } + + protected void removeLabelsFromNodeInHost(NodeId node, Set labels) + throws IOException { + Host host = nodeCollections.get(node.getHost()); + if (null == host) { + throw new IOException("Cannot remove labels from a host that " + + "does not exist. Create the host before adding labels to it."); + } + Node nm = host.nms.get(node); + if (nm != null) { + if (nm.labels == null) { + nm.labels = new HashSet(); + } else { + nm.labels.removeAll(labels); + } + } + } + + private void replaceLabelsForNode(NodeId node, Set oldLabels, + Set newLabels) throws IOException { + if(oldLabels != null) { + removeLabelsFromNodeInHost(node, oldLabels); + } + addLabelsToNodeInHost(node, newLabels); + } + @SuppressWarnings("unchecked") protected void internalUpdateLabelsOnNodes( Map> nodeToLabels, NodeLabelUpdateOperation op) @@ -597,10 +641,12 @@ protected void internalUpdateLabelsOnNodes( break; case REPLACE: replaceNodeForLabels(nodeId, host.labels, labels); + replaceLabelsForNode(nodeId, host.labels, labels); host.labels.clear(); host.labels.addAll(labels); for (Node node : host.nms.values()) { replaceNodeForLabels(node.nodeId, node.labels, labels); + replaceLabelsForNode(node.nodeId, node.labels, labels); node.labels = null; } break; @@ -625,6 +671,7 @@ protected void internalUpdateLabelsOnNodes( case REPLACE: oldLabels = getLabelsByNode(nodeId); replaceNodeForLabels(nodeId, oldLabels, labels); + replaceLabelsForNode(nodeId, oldLabels, labels); if (nm.labels == null) { nm.labels = new HashSet(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java index e86cca4e245a1..d9f9389866e43 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java @@ -616,4 +616,22 @@ public void testGetNodeLabelsInfo() throws IOException { toNodeId("n1"), toSet(NodeLabel.newInstance("p2", true)), toNodeId("n2"), toSet(NodeLabel.newInstance("p3", false)))); } + + @Test(timeout = 5000) + public void testRemoveNodeLabelsInfo() throws IOException { + mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("p1", true))); + mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("p2", true))); + mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p1"))); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p2"))); + + Map> labelsToNodes = mgr.getLabelsToNodes(); + assertLabelsToNodesEquals( + labelsToNodes, + ImmutableMap.of( + "p2", toSet(toNodeId("n1:1"), toNodeId("n1:0")))); + + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), new HashSet())); + Map> labelsToNodes2 = mgr.getLabelsToNodes(); + Assert.assertEquals(labelsToNodes2.get("p2"), null); + } } From 97f73923d32273554b265e2f0cd7b86806f6b105 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Sat, 20 Feb 2021 17:18:23 +0900 Subject: [PATCH 019/666] HADOOP-16748. Migrate to Python 3 and upgrade Yetus to 0.13.0 (#1738) - Upgrade Yetus to 0.13.0 to support Python 3 for the release scripts. - Removed determine-flaky-tests-hadoop.py. - Temporarily disabled shelldocs check due to YETUS-1099. Reviewed-by: Inigo Goiri Reviewed-by: Mingliang Liu --- dev-support/Jenkinsfile | 23 +- dev-support/bin/checkcompatibility.py | 37 +-- dev-support/bin/yetus-wrapper | 2 +- dev-support/determine-flaky-tests-hadoop.py | 245 -------------------- dev-support/docker/Dockerfile | 35 +-- dev-support/docker/Dockerfile_aarch64 | 35 +-- 6 files changed, 40 insertions(+), 337 deletions(-) delete mode 100755 dev-support/determine-flaky-tests-hadoop.py diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 1703d0153bfb6..6841ed30a79cc 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -35,7 +35,7 @@ pipeline { DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile" YETUS='yetus' // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION='6ab19e71eaf3234863424c6f684b34c1d3dcc0ce' + YETUS_VERSION='rel/0.13.0' } parameters { @@ -60,7 +60,7 @@ pipeline { stage ('precommit-run') { steps { withCredentials( - [usernamePassword(credentialsId: 'apache-hadoop-at-github.com', + [usernamePassword(credentialsId: '683f5dcf-5552-4b28-9fb1-6a6b77cf53dd', passwordVariable: 'GITHUB_TOKEN', usernameVariable: 'GITHUB_USER'), usernamePassword(credentialsId: 'hadoopqa-at-asf-jira', @@ -133,9 +133,6 @@ pipeline { # plugins to enable YETUS_ARGS+=("--plugins=all") - # use Hadoop's bundled shelldocs - YETUS_ARGS+=("--shelldocs=${WORKSPACE}/${SOURCEDIR}/dev-support/bin/shelldocs") - # don't let these tests cause -1s because we aren't really paying that # much attention to them YETUS_ARGS+=("--tests-filter=checkstyle") @@ -152,9 +149,6 @@ pipeline { # help keep the ASF boxes clean YETUS_ARGS+=("--sentinel") - # use emoji vote so it is easier to find the broken line - YETUS_ARGS+=("--github-use-emoji-vote") - # test with Java 8 and 11 YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64") YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64") @@ -174,6 +168,19 @@ pipeline { post { always { script { + // Publish status if it was missed (YETUS-1059) + withCredentials( + [usernamePassword(credentialsId: '683f5dcf-5552-4b28-9fb1-6a6b77cf53dd', + passwordVariable: 'GITHUB_TOKEN', + usernameVariable: 'GITHUB_USER')]) { + sh '''#!/usr/bin/env bash + YETUS_ARGS+=("--github-token=${GITHUB_TOKEN}") + YETUS_ARGS+=("--patch-dir=${WORKSPACE}/${PATCHDIR}") + TESTPATCHBIN="${WORKSPACE}/${YETUS}/precommit/src/main/shell/github-status-recovery.sh" + /usr/bin/env bash "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" ${EXTRA_ARGS} || true + ''' + } + // Yetus output archiveArtifacts "${env.PATCHDIR}/**" // Publish the HTML report so that it can be looked at diff --git a/dev-support/bin/checkcompatibility.py b/dev-support/bin/checkcompatibility.py index ad1e9cbe47ff2..3db36154ef9c5 100755 --- a/dev-support/bin/checkcompatibility.py +++ b/dev-support/bin/checkcompatibility.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -30,33 +30,16 @@ import shutil import subprocess import sys -import urllib2 -try: - import argparse -except ImportError: - sys.stderr.write("Please install argparse, e.g. via `pip install argparse`.") - sys.exit(2) +import urllib.request +import argparse # Various relative paths REPO_DIR = os.getcwd() def check_output(*popenargs, **kwargs): - r"""Run command with arguments and return its output as a byte string. - Backported from Python 2.7 as it's implemented as pure python on stdlib. - >>> check_output(['/usr/bin/python', '--version']) - Python 2.6.2 - """ - process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) - output, _ = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - error = subprocess.CalledProcessError(retcode, cmd) - error.output = output - raise error - return output + """ Run command with arguments and return its output as a string. """ + return subprocess.check_output(*popenargs, **kwargs, encoding='utf-8') + def get_repo_dir(): """ Return the path to the top of the repo. """ @@ -139,7 +122,7 @@ def checkout_java_acc(force): url = "https://github.com/lvc/japi-compliance-checker/archive/1.8.tar.gz" scratch_dir = get_scratch_dir() path = os.path.join(scratch_dir, os.path.basename(url)) - jacc = urllib2.urlopen(url) + jacc = urllib.request.urlopen(url) with open(path, 'wb') as w: w.write(jacc.read()) @@ -194,7 +177,7 @@ def run_java_acc(src_name, src_jars, dst_name, dst_jars, annotations): annotations_path = os.path.join(get_scratch_dir(), "annotations.txt") with file(annotations_path, "w") as f: for ann in annotations: - print >>f, ann + print(ann, file=f) args += ["-annotations-list", annotations_path] subprocess.check_call(args) @@ -264,8 +247,8 @@ def main(): parser.add_argument("--skip-build", action="store_true", help="Skip building the projects.") - parser.add_argument("src_rev", nargs=1, help="Source revision.") - parser.add_argument("dst_rev", nargs="?", default="HEAD", + parser.add_argument("src_rev", nargs=1, type=str, help="Source revision.") + parser.add_argument("dst_rev", nargs="?", type=str, default="HEAD", help="Destination revision. " + "If not specified, will use HEAD.") diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper index bca2316ae6784..8532d1749701b 100755 --- a/dev-support/bin/yetus-wrapper +++ b/dev-support/bin/yetus-wrapper @@ -77,7 +77,7 @@ WANTED="$1" shift ARGV=("$@") -HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.10.0} +HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.13.0} BIN=$(yetus_abs "${BASH_SOURCE-$0}") BINDIR=$(dirname "${BIN}") diff --git a/dev-support/determine-flaky-tests-hadoop.py b/dev-support/determine-flaky-tests-hadoop.py deleted file mode 100755 index 8644299bba4a2..0000000000000 --- a/dev-support/determine-flaky-tests-hadoop.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/env python -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Given a jenkins test job, this script examines all runs of the job done -# within specified period of time (number of days prior to the execution -# time of this script), and reports all failed tests. -# -# The output of this script includes a section for each run that has failed -# tests, with each failed test name listed. -# -# More importantly, at the end, it outputs a summary section to list all failed -# tests within all examined runs, and indicate how many runs a same test -# failed, and sorted all failed tests by how many runs each test failed. -# -# This way, when we see failed tests in PreCommit build, we can quickly tell -# whether a failed test is a new failure, or it failed before and how often it -# failed, so to have idea whether it may just be a flaky test. -# -# Of course, to be 100% sure about the reason of a test failure, closer look -# at the failed test for the specific run is necessary. -# -import sys -import platform -sysversion = sys.hexversion -onward30 = False -if sysversion < 0x020600F0: - sys.exit("Minimum supported python version is 2.6, the current version is " + - "Python" + platform.python_version()) - -if sysversion == 0x030000F0: - sys.exit("There is a known bug with Python" + platform.python_version() + - ", please try a different version"); - -if sysversion < 0x03000000: - import urllib2 -else: - onward30 = True - import urllib.request - -import datetime -import json as simplejson -import logging -from optparse import OptionParser -import time - -# Configuration -DEFAULT_JENKINS_URL = "https://builds.apache.org" -DEFAULT_JOB_NAME = "Hadoop-Common-trunk" -DEFAULT_NUM_PREVIOUS_DAYS = 14 -DEFAULT_TOP_NUM_FAILED_TEST = -1 - -SECONDS_PER_DAY = 86400 - -# total number of runs to examine -numRunsToExamine = 0 - -#summary mode -summary_mode = False - -#total number of errors -error_count = 0 - -""" Parse arguments """ -def parse_args(): - parser = OptionParser() - parser.add_option("-J", "--jenkins-url", type="string", - dest="jenkins_url", help="Jenkins URL", - default=DEFAULT_JENKINS_URL) - parser.add_option("-j", "--job-name", type="string", - dest="job_name", help="Job name to look at", - default=DEFAULT_JOB_NAME) - parser.add_option("-n", "--num-days", type="int", - dest="num_prev_days", help="Number of days to examine", - default=DEFAULT_NUM_PREVIOUS_DAYS) - parser.add_option("-t", "--top", type="int", - dest="num_failed_tests", - help="Summary Mode, only show top number of failed tests", - default=DEFAULT_TOP_NUM_FAILED_TEST) - - (options, args) = parser.parse_args() - if args: - parser.error("unexpected arguments: " + repr(args)) - return options - -""" Load data from specified url """ -def load_url_data(url): - if onward30: - ourl = urllib.request.urlopen(url) - codec = ourl.info().get_param('charset') - content = ourl.read().decode(codec) - data = simplejson.loads(content, strict=False) - else: - ourl = urllib2.urlopen(url) - data = simplejson.load(ourl, strict=False) - return data - -""" List all builds of the target project. """ -def list_builds(jenkins_url, job_name): - global summary_mode - url = "%(jenkins)s/job/%(job_name)s/api/json?tree=builds[url,result,timestamp]" % dict( - jenkins=jenkins_url, - job_name=job_name) - - try: - data = load_url_data(url) - - except: - if not summary_mode: - logging.error("Could not fetch: %s" % url) - error_count += 1 - raise - return data['builds'] - -""" Find the names of any tests which failed in the given build output URL. """ -def find_failing_tests(testReportApiJson, jobConsoleOutput): - global summary_mode - global error_count - ret = set() - try: - data = load_url_data(testReportApiJson) - - except: - if not summary_mode: - logging.error(" Could not open testReport, check " + - jobConsoleOutput + " for why it was reported failed") - error_count += 1 - return ret - - for suite in data['suites']: - for cs in suite['cases']: - status = cs['status'] - errDetails = cs['errorDetails'] - if (status == 'REGRESSION' or status == 'FAILED' or (errDetails is not None)): - ret.add(cs['className'] + "." + cs['name']) - - if len(ret) == 0 and (not summary_mode): - logging.info(" No failed tests in testReport, check " + - jobConsoleOutput + " for why it was reported failed.") - return ret - -""" Iterate runs of specfied job within num_prev_days and collect results """ -def find_flaky_tests(jenkins_url, job_name, num_prev_days): - global numRunsToExamine - global summary_mode - all_failing = dict() - # First list all builds - builds = list_builds(jenkins_url, job_name) - - # Select only those in the last N days - min_time = int(time.time()) - SECONDS_PER_DAY * num_prev_days - builds = [b for b in builds if (int(b['timestamp']) / 1000) > min_time] - - # Filter out only those that failed - failing_build_urls = [(b['url'] , b['timestamp']) for b in builds - if (b['result'] in ('UNSTABLE', 'FAILURE'))] - - tnum = len(builds) - num = len(failing_build_urls) - numRunsToExamine = tnum - if not summary_mode: - logging.info(" THERE ARE " + str(num) + " builds (out of " + str(tnum) - + ") that have failed tests in the past " + str(num_prev_days) + " days" - + ((".", ", as listed below:\n")[num > 0])) - - for failed_build_with_time in failing_build_urls: - failed_build = failed_build_with_time[0]; - jobConsoleOutput = failed_build + "Console"; - testReport = failed_build + "testReport"; - testReportApiJson = testReport + "/api/json"; - - ts = float(failed_build_with_time[1]) / 1000. - st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') - if not summary_mode: - logging.info("===>%s" % str(testReport) + " (" + st + ")") - failing = find_failing_tests(testReportApiJson, jobConsoleOutput) - if failing: - for ftest in failing: - if not summary_mode: - logging.info(" Failed test: %s" % ftest) - all_failing[ftest] = all_failing.get(ftest,0)+1 - - return all_failing - -def main(): - global numRunsToExamine - global summary_mode - logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) - - # set up logger to write to stdout - soh = logging.StreamHandler(sys.stdout) - soh.setLevel(logging.INFO) - logger = logging.getLogger() - logger.removeHandler(logger.handlers[0]) - logger.addHandler(soh) - - opts = parse_args() - logging.info("****Recently FAILED builds in url: " + opts.jenkins_url - + "/job/" + opts.job_name + "") - - if opts.num_failed_tests != -1: - summary_mode = True - - all_failing = find_flaky_tests(opts.jenkins_url, opts.job_name, - opts.num_prev_days) - if len(all_failing) == 0: - raise SystemExit(0) - - if summary_mode and opts.num_failed_tests < len(all_failing): - logging.info("\nAmong " + str(numRunsToExamine) + - " runs examined, top " + str(opts.num_failed_tests) + - " failed tests <#failedRuns: testName>:") - else: - logging.info("\nAmong " + str(numRunsToExamine) + - " runs examined, all failed tests <#failedRuns: testName>:") - - # print summary section: all failed tests sorted by how many times they failed - line_count = 0 - for tn in sorted(all_failing, key=all_failing.get, reverse=True): - logging.info(" " + str(all_failing[tn])+ ": " + tn) - if summary_mode: - line_count += 1 - if line_count == opts.num_failed_tests: - break - - if summary_mode and error_count > 0: - logging.info("\n" + str(error_count) + " errors found, you may " - + "re-run in non summary mode to see error details."); - -if __name__ == "__main__": - main() diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index cf442902d3d9c..930d44a839c4f 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -72,10 +72,11 @@ RUN apt-get -q update \ openjdk-8-jdk \ pinentry-curses \ pkg-config \ - python \ - python2.7 \ - python-pkg-resources \ - python-setuptools \ + python3 \ + python3-pip \ + python3-pkg-resources \ + python3-setuptools \ + python3-wheel \ rsync \ shellcheck \ software-properties-common \ @@ -108,18 +109,6 @@ RUN mkdir -p /opt/boost-library \ && cd /root \ && rm -rf /opt/boost-library -#### -# Install pip (deprecated from Focal toolchain) -#### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/pip \ - && curl -L https://bootstrap.pypa.io/2.7/get-pip.py > get-pip.py \ - && mv get-pip.py /opt/pip \ - && cd /opt/pip \ - && python2.7 get-pip.py "pip < 21.0" \ - && cd /root \ - && rm -rf /opt/pip - ###### # Install Google Protobuf 3.7.1 (3.6.1 ships with Focal) ###### @@ -139,19 +128,9 @@ ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" #### -# Install pylint at fixed version (2.0.0 removed python2 support) -# https://github.com/PyCQA/pylint/issues/2294 -#### -RUN pip2 install \ - astroid==1.6.6 \ - isort==4.3.21 \ - configparser==4.0.2 \ - pylint==1.9.2 - -#### -# Install dateutil.parser +# Install pylint and python-dateutil #### -RUN pip2 install python-dateutil==2.7.3 +RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 #### # Install bower diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index bdf9e0c7e59f6..858c9b36ddf05 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -76,10 +76,11 @@ RUN apt-get -q update \ openjdk-8-jdk \ pinentry-curses \ pkg-config \ - python \ - python2.7 \ - python-pkg-resources \ - python-setuptools \ + python3 \ + python3-pip \ + python3-pkg-resources \ + python3-setuptools \ + python3-wheel \ rsync \ shellcheck \ software-properties-common \ @@ -112,18 +113,6 @@ RUN mkdir -p /opt/boost-library \ && cd /root \ && rm -rf /opt/boost-library -#### -# Install pip (deprecated from Focal toolchain) -#### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/pip \ - && curl -L https://bootstrap.pypa.io/2.7/get-pip.py > get-pip.py \ - && mv get-pip.py /opt/pip \ - && cd /opt/pip \ - && python2.7 get-pip.py "pip < 21.0" \ - && cd /root \ - && rm -rf /opt/pip - ###### # Install Google Protobuf 3.7.1 (3.6.1 ships with Focal) ###### @@ -143,19 +132,9 @@ ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" #### -# Install pylint at fixed version (2.0.0 removed python2 support) -# https://github.com/PyCQA/pylint/issues/2294 -#### -RUN pip2 install \ - astroid==1.6.6 \ - isort==4.3.21 \ - configparser==4.0.2 \ - pylint==1.9.2 - -#### -# Install dateutil.parser +# Install pylint and python-dateutil #### -RUN pip2 install python-dateutil==2.7.3 +RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 #### # Install bower From f07f43a17a74ce11d7ffdb230f91aa5b30a2a08a Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sat, 20 Feb 2021 19:34:29 +0530 Subject: [PATCH 020/666] HDFS-15839. RBF: Cannot get method setBalancerBandwidth on Router Client. Contributed by Yang Yun. --- .../federation/router/RouterClientProtocol.java | 2 +- .../server/federation/router/TestRouterRpc.java | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 17524d421c17b..2a3bd735885e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -1183,7 +1183,7 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.UNCHECKED); RemoteMethod method = new RemoteMethod("setBalancerBandwidth", - new Class[] {Long.class}, bandwidth); + new Class[] {long.class}, bandwidth); final Set nss = namenodeResolver.getNamespaces(); rpcClient.invokeConcurrent(nss, method, true, false); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 8e5b761c919ca..a7c002566482d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -99,6 +99,7 @@ import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; +import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; @@ -1934,4 +1935,16 @@ public void testMkdirsWithCallerContext() throws IOException { .contains("callerContext=clientContext,clientIp:")); assertTrue(verifyFileExists(routerFS, dirPath)); } + + @Test + public void testSetBalancerBandwidth() throws Exception { + long defaultBandwidth = + DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT; + long newBandwidth = defaultBandwidth * 2; + routerProtocol.setBalancerBandwidth(newBandwidth); + ArrayList datanodes = cluster.getCluster().getDataNodes(); + GenericTestUtils.waitFor(() -> { + return datanodes.get(0).getBalancerBandwidth() == newBandwidth; + }, 100, 60 * 1000); + } } From 0bcb6a44fe80fa4472615e0bcff06b1ab3833164 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sat, 20 Feb 2021 19:48:00 +0530 Subject: [PATCH 021/666] HDFS-15806. DeadNodeDetector should close all the threads when it is closed. Contributed by Jinglun. --- .../org/apache/hadoop/hdfs/ClientContext.java | 8 +---- .../apache/hadoop/hdfs/DeadNodeDetector.java | 31 +++++++++++++++++++ .../hadoop/hdfs/TestDeadNodeDetection.java | 12 +++++++ 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java index 47e985b68e702..5bb7e03fc9d3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java @@ -321,13 +321,7 @@ synchronized void unreference() { Preconditions.checkState(counter > 0); counter--; if (counter == 0 && deadNodeDetectionEnabled && deadNodeDetector != null) { - deadNodeDetector.interrupt(); - try { - deadNodeDetector.join(); - } catch (InterruptedException e) { - LOG.warn("Encountered exception while waiting to join on dead " + - "node detector thread.", e); - } + deadNodeDetector.shutdown(); deadNodeDetector = null; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java index 112bc0407f90f..e17f2612675b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java @@ -271,6 +271,37 @@ public void run() { } } + /** + * Shutdown all the threads. + */ + public void shutdown() { + threadShutDown(this); + threadShutDown(probeDeadNodesSchedulerThr); + threadShutDown(probeSuspectNodesSchedulerThr); + probeDeadNodesThreadPool.shutdown(); + probeSuspectNodesThreadPool.shutdown(); + rpcThreadPool.shutdown(); + } + + private static void threadShutDown(Thread thread) { + if (thread != null && thread.isAlive()) { + thread.interrupt(); + try { + thread.join(); + } catch (InterruptedException e) { + } + } + } + + @VisibleForTesting + boolean isThreadsShutdown() { + return !this.isAlive() && !probeDeadNodesSchedulerThr.isAlive() + && !probeSuspectNodesSchedulerThr.isAlive() + && probeDeadNodesThreadPool.isShutdown() + && probeSuspectNodesThreadPool.isShutdown() + && rpcThreadPool.isShutdown(); + } + @VisibleForTesting static void setDisabledProbeThreadForTest( boolean disabledProbeThreadForTest) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java index 9c52fcd8d1d26..9134f36bc95dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java @@ -357,6 +357,18 @@ public void testCloseDeadNodeDetector() throws Exception { dfs1.close(); } + @Test + public void testDeadNodeDetectorThreadsShutdown() throws Exception { + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem + .newInstance(new URI("hdfs://127.0.0.1:2001/"), conf); + DeadNodeDetector detector = dfs.getClient().getDeadNodeDetector(); + assertNotNull(detector); + dfs.close(); + assertTrue(detector.isThreadsShutdown()); + detector = dfs.getClient().getDeadNodeDetector(); + assertNull(detector); + } + private void createFile(FileSystem fs, Path filePath) throws IOException { FSDataOutputStream out = null; try { From 56d64b3d4027a67bded6c11e6648828614d1fb04 Mon Sep 17 00:00:00 2001 From: jianghuazhu <740087514@qq.com> Date: Mon, 22 Feb 2021 15:22:32 +0800 Subject: [PATCH 022/666] HDFS-15826. Solve the problem of incorrect progress of delegation tokens when loading FsImage. (#2686) Co-authored-by: zhujianghua --- .../token/delegation/DelegationTokenSecretManager.java | 3 ++- .../hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java | 3 +-- .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 6 ++++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index b061d4bfa6b10..68f3dd6d67f12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -191,7 +191,7 @@ public SecretManagerState( } } - public synchronized void loadSecretManagerState(SecretManagerState state) + public synchronized void loadSecretManagerState(SecretManagerState state, Counter counter) throws IOException { Preconditions.checkState(!running, "Can't load state from image in a running SecretManager."); @@ -211,6 +211,7 @@ public synchronized void loadSecretManagerState(SecretManagerState state) id.setSequenceNumber(t.getSequenceNumber()); id.setMasterKeyId(t.getMasterKeyId()); addPersistedDelegationToken(id, t.getExpiryDate()); + counter.increment(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index 7556727e6293a..183449f574ec9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -544,10 +544,9 @@ private void loadSecretManagerSection(InputStream in, StartupProgress prog, Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep); for (int i = 0; i < numTokens; ++i) { tokens.add(SecretManagerSection.PersistToken.parseDelimitedFrom(in)); - counter.increment(); } - fsn.loadSecretManagerState(s, keys, tokens); + fsn.loadSecretManagerState(s, keys, tokens, counter); } private void loadCacheManagerSection(InputStream in, StartupProgress prog, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index cf836356d0063..89689bd9b40a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6241,8 +6241,10 @@ void loadSecretManagerStateCompat(DataInput in) throws IOException { void loadSecretManagerState(SecretManagerSection s, List keys, - List tokens) throws IOException { - dtSecretManager.loadSecretManagerState(new SecretManagerState(s, keys, tokens)); + List tokens, + StartupProgress.Counter counter) throws IOException { + dtSecretManager.loadSecretManagerState(new SecretManagerState(s, keys, tokens), + counter); } /** From e380a9fa6546c48a4a1e8b36b30fdf54f680e29b Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 22 Feb 2021 17:33:02 +0900 Subject: [PATCH 023/666] HADOOP-17534. Update Jackson to 2.10.5 and Jackson databind to 2.10.5.1 (#2708) Reviewed-by: Wei-Chiu Chuang --- hadoop-project/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index c3a52d32af870..c26ceacc67308 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -71,8 +71,8 @@ 1.9.13 - 2.10.3 - 2.10.3 + 2.10.5 + 2.10.5.1 4.5.13 From c8f28f2ad2ab514176d5a4a1c8b05b08afbf2342 Mon Sep 17 00:00:00 2001 From: Wang Yuxuan <601377065@qq.com> Date: Mon, 22 Feb 2021 18:10:39 +0800 Subject: [PATCH 024/666] first commit (#2559) Co-authored-by: wangyuxuan --- .../apache/hadoop/hdfs/server/datanode/DirectoryScanner.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 66cfa01a60f6d..8e331712d44be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -506,8 +506,7 @@ private void scan() { } // Block file and/or metadata file exists on the disk // Block exists in memory - if (info.getVolume().getStorageType() != StorageType.PROVIDED - && info.getBlockFile() == null) { + if (info.getBlockFile() == null) { // Block metadata file exits and block file is missing addDifference(diffRecord, statsRecord, info); } else if (info.getGenStamp() != memBlock.getGenerationStamp() From 173427d197142db8a3d935cc516d93f49b561a47 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 22 Feb 2021 18:13:06 +0800 Subject: [PATCH 025/666] Revert "first commit (#2559)" This reverts commit 1b94569d893ca54ba86ad67340e2e9db27252e0f. --- .../apache/hadoop/hdfs/server/datanode/DirectoryScanner.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 8e331712d44be..66cfa01a60f6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -506,7 +506,8 @@ private void scan() { } // Block file and/or metadata file exists on the disk // Block exists in memory - if (info.getBlockFile() == null) { + if (info.getVolume().getStorageType() != StorageType.PROVIDED + && info.getBlockFile() == null) { // Block metadata file exits and block file is missing addDifference(diffRecord, statsRecord, info); } else if (info.getGenStamp() != memBlock.getGenerationStamp() From dca914969f2323b9033854b52e2bb86e7caa0c41 Mon Sep 17 00:00:00 2001 From: Wang Yuxuan <601377065@qq.com> Date: Mon, 22 Feb 2021 18:10:39 +0800 Subject: [PATCH 026/666] HDFS-15734. [READ] DirectoryScanner#scan need not check StorageType.PROVIDED (#2559) Co-authored-by: wangyuxuan --- .../apache/hadoop/hdfs/server/datanode/DirectoryScanner.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 66cfa01a60f6d..8e331712d44be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -506,8 +506,7 @@ private void scan() { } // Block file and/or metadata file exists on the disk // Block exists in memory - if (info.getVolume().getStorageType() != StorageType.PROVIDED - && info.getBlockFile() == null) { + if (info.getBlockFile() == null) { // Block metadata file exits and block file is missing addDifference(diffRecord, statsRecord, info); } else if (info.getGenStamp() != memBlock.getGenerationStamp() From 7e50283a5253d97f02e0aed3bf461d6f3714c241 Mon Sep 17 00:00:00 2001 From: sumangala-patki <70206833+sumangala-patki@users.noreply.github.com> Date: Mon, 22 Feb 2021 17:15:58 +0530 Subject: [PATCH 027/666] HADOOP-17537. ABFS: Correct assertion reversed in HADOOP-13327 Contributed Sumangala Patki. --- .../hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java index d8f0dc28dd5ee..ec33257060278 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java @@ -326,7 +326,8 @@ public void testStreamCapabilitiesWithFlushEnabled() throws Exception { try (FSDataOutputStream stream = getStreamAfterWrite(fs, testFilePath, buffer, true)) { assertHasStreamCapabilities(stream, StreamCapabilities.HFLUSH, - StreamCapabilities.HSYNC, + StreamCapabilities.HSYNC); + assertLacksStreamCapabilities(stream, StreamCapabilities.DROPBEHIND, StreamCapabilities.READAHEAD, StreamCapabilities.UNBUFFER); From 7827f7d8150e38668e01e7580361c08cedc06df0 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Mon, 22 Feb 2021 12:55:45 +0100 Subject: [PATCH 028/666] YARN-10513. CS Flexible Auto Queue Creation RM UIv2 modifications. Contributed by Andras Gyori. --- .../yarn-queue-partition-capacity-labels.js | 14 +++++++- .../app/models/yarn-queue/capacity-queue.js | 13 +++++++ .../serializers/yarn-queue/capacity-queue.js | 5 +++ .../yarn-queue-partition-capacity-labels.hbs | 22 +++++++++++- .../main/webapp/app/templates/yarn-queue.hbs | 36 ++++++++++++++----- 5 files changed, 80 insertions(+), 10 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js index e7f9c03f35649..788155c38772c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js @@ -37,12 +37,24 @@ export default Ember.Component.extend({ const userLimit = queue.get("userLimit"); const userLimitFactor = queue.get("userLimitFactor"); const isLeafQueue = queue.get("isLeafQueue"); + const isWeightMode = queue.get("isWeightMode"); + const isFlexibleDynamicQueue = queue.get("isFlexibleDynamicQueue"); + const weight = queue.get("weight"); + const orderingPolicy = queue.get("orderingPolicy"); + const normalizedWeight = queue.get("normalizedWeight"); + const creationMethod = queue.get("creationMethod"); return { ...partitionMap[filteredParition], userLimit, userLimitFactor, - isLeafQueue + isLeafQueue, + isWeightMode, + weight, + normalizedWeight, + orderingPolicy, + creationMethod, + isFlexibleDynamicQueue }; } }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js index ebcccb9eec3d6..885c6a521541c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js @@ -21,6 +21,7 @@ import Converter from 'yarn-ui/utils/converter'; export default DS.Model.extend({ name: DS.attr("string"), + path: DS.attr("string"), children: DS.attr("array"), parent: DS.attr("string"), capacity: DS.attr("number"), @@ -31,6 +32,10 @@ export default DS.Model.extend({ absCapacity: DS.attr("number"), absMaxCapacity: DS.attr("number"), absUsedCapacity: DS.attr("number"), + weight: DS.attr("number"), + normalizedWeight: DS.attr("number"), + creationMethod: DS.attr("string"), + orderingPolicy: DS.attr("string"), state: DS.attr("string"), userLimit: DS.attr("number"), userLimitFactor: DS.attr("number"), @@ -49,6 +54,14 @@ export default DS.Model.extend({ return len <= 0; }.property("children"), + isWeightMode: function() { + return this.get("weight") !== -1; + }.property("children"), + + isFlexibleDynamicQueue: function() { + return this.get("creationMethod") === "dynamicFlexible"; + }.property("children"), + capacitiesBarChartData: function() { var floatToFixed = Converter.floatToFixed; return [ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js index 2dec5205fdd51..e9ce25d2cb836 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js @@ -92,6 +92,7 @@ export default DS.JSONAPISerializer.extend({ type: primaryModelClass.modelName, // yarn-queue attributes: { name: payload.queueName, + path: payload.queuePath, parent: payload.myParent, children: children, capacity: payload.capacity, @@ -100,7 +101,11 @@ export default DS.JSONAPISerializer.extend({ absCapacity: payload.absoluteCapacity, absMaxCapacity: payload.absoluteMaxCapacity, absUsedCapacity: payload.absoluteUsedCapacity, + weight: payload.weight, + normalizedWeight: payload.normalizedWeight, + creationMethod: payload.creationMethod, state: payload.state, + orderingPolicy: payload.orderingPolicyInfo, userLimit: payload.userLimit, userLimitFactor: payload.userLimitFactor, preemptionDisabled: payload.preemptionDisabled, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs index fdecb2de310bf..a84538da8d28f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs @@ -30,6 +30,18 @@ {{data.absoluteMaxCapacity}}% +{{#if data.isWeightMode}} +
+ + weight + {{data.weight}} + + + normalized weight + {{data.normalizedWeight}} + +
+{{else}}
configured capacity @@ -40,6 +52,7 @@ {{data.maxCapacity}}%
+{{/if}} {{#if data.isLeafQueue}}
@@ -51,4 +64,11 @@ {{data.userLimitFactor}}
-{{/if}} \ No newline at end of file +{{/if}} +{{#if data.isFlexibleDynamicQueue}} +
+ + flexible dynamic queue + +
+{{/if}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs index 8024ffa0c7363..b65a3be69e445 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs @@ -31,14 +31,34 @@ {{/if}} {{#if (eq model.queues.firstObject.type "capacity")}}
- - configured capacity - {{model.selectedQueue.capacity}}% - - - configured max capacity - {{model.selectedQueue.maxCapacity}}% - + {{#if model.selectedQueue.isWeightMode}} + + weight + {{model.selectedQueue.weight}} + + + normalized weight + {{model.selectedQueue.normalizedWeight}} + + {{else}} + + configured capacity + {{model.selectedQueue.capacity}}% + + + configured max capacity + {{model.selectedQueue.maxCapacity}}% + + {{/if}} + + ordering policy + {{model.selectedQueue.orderingPolicy}} + + {{#if model.selectedQueue.isFlexibleDynamicQueue}} + + flexible dynamic queue + + {{/if}} {{#if model.selectedQueue.isLeafQueue}} user limit From 0145be6eb579639bd9a3ef92fe686cdee395ef38 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 22 Feb 2021 23:15:04 +0900 Subject: [PATCH 029/666] MAPREDUCE-7323. Remove job_history_summary.py. (#2712) Reviewed-by: Takanobu Asanuma --- .../examples/terasort/job_history_summary.py | 100 ------------------ 1 file changed, 100 deletions(-) delete mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py deleted file mode 100644 index 70725f833ea94..0000000000000 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import sys - -pat = re.compile('(?P[^=]+)="(?P[^"]*)" *') -counterPat = re.compile('(?P[^:]+):(?P[^,]*),?') - -def parse(tail): - result = {} - for n,v in re.findall(pat, tail): - result[n] = v - return result - -mapStartTime = {} -mapEndTime = {} -reduceStartTime = {} -reduceShuffleTime = {} -reduceSortTime = {} -reduceEndTime = {} -reduceBytes = {} - -for line in sys.stdin: - words = line.split(" ",1) - event = words[0] - attrs = parse(words[1]) - if event == 'MapAttempt': - if attrs.has_key("START_TIME"): - mapStartTime[attrs["TASKID"]] = int(attrs["START_TIME"])/1000 - elif attrs.has_key("FINISH_TIME"): - mapEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000 - elif event == 'ReduceAttempt': - if attrs.has_key("START_TIME"): - reduceStartTime[attrs["TASKID"]] = int(attrs["START_TIME"]) / 1000 - elif attrs.has_key("FINISH_TIME"): - reduceShuffleTime[attrs["TASKID"]] = int(attrs["SHUFFLE_FINISHED"])/1000 - reduceSortTime[attrs["TASKID"]] = int(attrs["SORT_FINISHED"])/1000 - reduceEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000 - elif event == 'Task': - if attrs["TASK_TYPE"] == "REDUCE" and attrs.has_key("COUNTERS"): - for n,v in re.findall(counterPat, attrs["COUNTERS"]): - if n == "File Systems.HDFS bytes written": - reduceBytes[attrs["TASKID"]] = int(v) - -runningMaps = {} -shufflingReduces = {} -sortingReduces = {} -runningReduces = {} -startTime = min(reduce(min, mapStartTime.values()), - reduce(min, reduceStartTime.values())) -endTime = max(reduce(max, mapEndTime.values()), - reduce(max, reduceEndTime.values())) - -reduces = reduceBytes.keys() -reduces.sort() - -print "Name reduce-output-bytes shuffle-finish reduce-finish" -for r in reduces: - print r, reduceBytes[r], reduceShuffleTime[r] - startTime, - print reduceEndTime[r] - startTime - -print - -for t in range(startTime, endTime): - runningMaps[t] = 0 - shufflingReduces[t] = 0 - sortingReduces[t] = 0 - runningReduces[t] = 0 - -for map in mapStartTime.keys(): - for t in range(mapStartTime[map], mapEndTime[map]): - runningMaps[t] += 1 -for reduce in reduceStartTime.keys(): - for t in range(reduceStartTime[reduce], reduceShuffleTime[reduce]): - shufflingReduces[t] += 1 - for t in range(reduceShuffleTime[reduce], reduceSortTime[reduce]): - sortingReduces[t] += 1 - for t in range(reduceSortTime[reduce], reduceEndTime[reduce]): - runningReduces[t] += 1 - -print "time maps shuffle merge reduce" -for t in range(startTime, endTime): - print t - startTime, runningMaps[t], shufflingReduces[t], sortingReduces[t], - print runningReduces[t] From 24be512b04af792b45b0e811c88192418ec6b2f4 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Mon, 22 Feb 2021 18:48:14 +0000 Subject: [PATCH 030/666] YARN-10647. Fix TestRMNodeLabelsManager failed after YARN-10501. Contributed by Qi Zhu. --- .../hadoop/yarn/nodelabels/CommonNodeLabelsManager.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java index 8fd0315fa8577..5b9382fcff2e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java @@ -646,7 +646,9 @@ protected void internalUpdateLabelsOnNodes( host.labels.addAll(labels); for (Node node : host.nms.values()) { replaceNodeForLabels(node.nodeId, node.labels, labels); - replaceLabelsForNode(node.nodeId, node.labels, labels); + if (node.labels != null && labels != null) { + replaceLabelsForNode(node.nodeId, node.labels, labels); + } node.labels = null; } break; From f7defe836aa10bd803818dd4d4f936b69fbac0cb Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Tue, 23 Feb 2021 15:12:06 +0900 Subject: [PATCH 031/666] HDFS-15845. RBF: Router fails to start due to NoClassDefFoundError for hadoop-federation-balance. (#2714) --- hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index fa933540735ca..7d3a047a31357 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -112,6 +112,7 @@ function hdfscmd_case HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSAdmin ;; dfsrouter) + hadoop_add_to_classpath_tools hadoop-federation-balance HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.DFSRouter' ;; From 9d9e982477d1f3c1fc8a36706509b71b3f335072 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Tue, 23 Feb 2021 19:08:38 +0530 Subject: [PATCH 032/666] HDFS-15847. create client protocol: add ecPolicyName & storagePolicy param to debug statement string. Contributed by Bhavik Patel. --- .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 89689bd9b40a8..e98a59d7283e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2664,7 +2664,9 @@ private HdfsFileStatus startFileInt(String src, .append(", createFlag=").append(flag) .append(", blockSize=").append(blockSize) .append(", supportedVersions=") - .append(Arrays.toString(supportedVersions)); + .append(Arrays.toString(supportedVersions)) + .append(", ecPolicyName=").append(ecPolicyName) + .append(", storagePolicy=").append(storagePolicy); NameNode.stateChangeLog.debug(builder.toString()); } if (!DFSUtil.isValidName(src) || From 6c62796388fb6636b271410d17013c63caf92f16 Mon Sep 17 00:00:00 2001 From: Mike Date: Tue, 23 Feb 2021 20:03:27 +0300 Subject: [PATCH 033/666] HADOOP-17528. SFTP File System: close the connection pool when closing a FileSystem (#2701) Contributed by Mike Pryakhin. --- .../apache/hadoop/fs/sftp/SFTPFileSystem.java | 31 ++++++++++++++++++- .../hadoop/fs/sftp/TestSFTPFileSystem.java | 11 +++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java index 898f615ec4633..297ec048e50c7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java @@ -24,6 +24,7 @@ import java.net.URLDecoder; import java.util.ArrayList; import java.util.Vector; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -50,6 +51,7 @@ public class SFTPFileSystem extends FileSystem { private SFTPConnectionPool connectionPool; private URI uri; + private final AtomicBoolean closed = new AtomicBoolean(false); private static final int DEFAULT_SFTP_PORT = 22; private static final int DEFAULT_MAX_CONNECTION = 5; @@ -83,6 +85,7 @@ public class SFTPFileSystem extends FileSystem { "Destination path %s already exist, cannot rename!"; public static final String E_FAILED_GETHOME = "Failed to get home directory"; public static final String E_FAILED_DISCONNECT = "Failed to disconnect"; + public static final String E_FS_CLOSED = "FileSystem is closed!"; /** * Set configuration from UI. @@ -138,8 +141,9 @@ private void setConfigurationFromURI(URI uriInfo, Configuration conf) * @throws IOException */ private ChannelSftp connect() throws IOException { - Configuration conf = getConf(); + checkNotClosed(); + Configuration conf = getConf(); String host = conf.get(FS_SFTP_HOST, null); int port = conf.getInt(FS_SFTP_HOST_PORT, DEFAULT_SFTP_PORT); String user = conf.get(FS_SFTP_USER_PREFIX + host, null); @@ -703,6 +707,31 @@ public FileStatus getFileStatus(Path f) throws IOException { } } + @Override + public void close() throws IOException { + if (closed.getAndSet(true)) { + return; + } + try { + super.close(); + } finally { + if (connectionPool != null) { + connectionPool.shutdown(); + } + } + } + + /** + * Verify that the input stream is open. Non blocking; this gives + * the last state of the volatile {@link #closed} field. + * @throws IOException if the connection is closed. + */ + private void checkNotClosed() throws IOException { + if (closed.get()) { + throw new IOException(uri + ": " + E_FS_CLOSED); + } + } + @VisibleForTesting SFTPConnectionPool getConnectionPool() { return connectionPool; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java index 693926242c95d..58452f86f5999 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java @@ -374,4 +374,15 @@ public void testMkDirs() throws IOException { assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), is(1)); } + + @Test + public void testCloseFileSystemClosesConnectionPool() throws Exception { + SFTPFileSystem fs = (SFTPFileSystem) sftpFs; + fs.getHomeDirectory(); + assertThat(fs.getConnectionPool().getLiveConnCount(), is(1)); + fs.close(); + assertThat(fs.getConnectionPool().getLiveConnCount(), is(0)); + ///making sure that re-entrant close calls are safe + fs.close(); + } } From 3672c76c1a8f4dd320ef91a84ed48c14f20f8cc8 Mon Sep 17 00:00:00 2001 From: Masatake Iwasaki Date: Wed, 24 Feb 2021 07:46:02 +0900 Subject: [PATCH 034/666] HADOOP-17538. Add kms-default.xml and httpfs-default.xml to site index. (#2716) --- hadoop-project/src/site/site.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index 50f75873bf878..a150385048f2f 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -216,6 +216,8 @@ + + From 28b879819b9825db2cb009a9637cf8b874154e5d Mon Sep 17 00:00:00 2001 From: LeonGao Date: Tue, 23 Feb 2021 22:29:48 -0800 Subject: [PATCH 035/666] HDFS-15781. Add metrics for how blocks are moved in replaceBlock. (#2704) --- .../hdfs/server/datanode/DataXceiver.java | 2 + .../fsdataset/impl/FsDatasetImpl.java | 4 + .../datanode/metrics/DataNodeMetrics.java | 22 ++++++ .../hadoop/hdfs/server/mover/TestMover.java | 75 ++++++++++++++++--- 4 files changed, 91 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index d3713617a6321..278ee098362a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -1250,6 +1250,8 @@ public void replaceBlock(final ExtendedBlock block, LOG.info("Moved {} from {}, delHint={}", block, peer.getRemoteAddressString(), delHint); + + datanode.metrics.incrReplaceBlockOpToOtherHost(); } } catch (IOException ioe) { opStatus = ERROR; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index f8a97d7e21c43..edf2edcb1b800 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -1121,6 +1121,10 @@ public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, } try { moveBlock(block, replicaInfo, volumeRef, useVolumeOnSameMount); + datanode.getMetrics().incrReplaceBlockOpOnSameHost(); + if (useVolumeOnSameMount) { + datanode.getMetrics().incrReplaceBlockOpOnSameMount(); + } } finally { if (volumeRef != null) { volumeRef.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java index 9350d95fcddc3..712d8f46ef7b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java @@ -188,6 +188,15 @@ public class DataNodeMetrics { @Metric MutableCounterLong packetsSlowWriteToDisk; @Metric MutableCounterLong packetsSlowWriteToOsCache; + @Metric("Number of replaceBlock ops between" + + " storage types on same host with local copy") + private MutableCounterLong replaceBlockOpOnSameHost; + @Metric("Number of replaceBlock ops between" + + " storage types on same disk mount with same disk tiering feature") + private MutableCounterLong replaceBlockOpOnSameMount; + @Metric("Number of replaceBlock ops to another node") + private MutableCounterLong replaceBlockOpToOtherHost; + final MetricsRegistry registry = new MetricsRegistry("datanode"); @Metric("Milliseconds spent on calling NN rpc") private MutableRatesWithAggregation @@ -711,4 +720,17 @@ public void incrPacketsSlowWriteToDisk() { public void incrPacketsSlowWriteToOsCache() { packetsSlowWriteToOsCache.incr(); } + + public void incrReplaceBlockOpOnSameMount() { + replaceBlockOpOnSameMount.incr(); + } + + public void incrReplaceBlockOpOnSameHost() { + replaceBlockOpOnSameHost.incr(); + } + + public void incrReplaceBlockOpToOtherHost() { + replaceBlockOpToOtherHost.incr(); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 481c7cf33a8e6..f428b2c2a8fc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -34,6 +34,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import java.io.File; import java.io.IOException; @@ -83,6 +85,7 @@ import org.apache.hadoop.hdfs.server.mover.Mover.MLocation; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -170,14 +173,26 @@ public void testScheduleSameBlock() throws IOException { } } - private void testWithinSameNode(Configuration conf) throws Exception { - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(3) - .storageTypes( - new StorageType[] {StorageType.DISK, StorageType.ARCHIVE}) - .build(); + private void testMovementWithLocalityOption(Configuration conf, + boolean sameNode) throws Exception { + final MiniDFSCluster cluster; + if (sameNode) { + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3) + .storageTypes( + new StorageType[] {StorageType.DISK, StorageType.ARCHIVE}) + .build(); + } else { + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(2) + .storageTypes( + new StorageType[][] {{StorageType.DISK}, {StorageType.ARCHIVE}}) + .build(); + } + try { cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); final String file = "/testScheduleWithinSameNode/file"; Path dir = new Path("/testScheduleWithinSameNode"); @@ -201,12 +216,37 @@ private void testWithinSameNode(Configuration conf) throws Exception { Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc); // Wait till namenode notified about the block location details - waitForLocatedBlockWithArchiveStorageType(dfs, file, 3); + waitForLocatedBlockWithArchiveStorageType(dfs, file, sameNode ? 3 : 1); + + MetricsRecordBuilder rb = + getMetrics(cluster.getDataNodes().get(1).getMetrics().name()); + + if (!sameNode) { + testReplaceBlockOpLocalityMetrics(0, 0, 1, rb); + } else if (conf.getBoolean( + DFSConfigKeys.DFS_DATANODE_ALLOW_SAME_DISK_TIERING, false)) { + testReplaceBlockOpLocalityMetrics(1, 1, 0, rb); + } else { + testReplaceBlockOpLocalityMetrics(1, 0, 0, rb); + } } finally { cluster.shutdown(); } } + private void testReplaceBlockOpLocalityMetrics( + long sameHost, + long sameMount, + long otherHost, + MetricsRecordBuilder rb) { + assertCounter("ReplaceBlockOpOnSameHost", + sameHost, rb); + assertCounter("ReplaceBlockOpOnSameMount", + sameMount, rb); + assertCounter("ReplaceBlockOpToOtherHost", + otherHost, rb); + } + private void setupStoragePoliciesAndPaths(DistributedFileSystem dfs1, DistributedFileSystem dfs2, Path dir, String file) @@ -441,17 +481,27 @@ public Boolean get() { }, 100, 3000); } + /** + * Test block movement with different block locality scenarios. + * 1) Block will be copied to local host, + * if there is target storage type on same datanode. + * 2) Block will be moved within local mount with hardlink, + * if disk/archive are on same mount with same-disk-tiering feature on. + * 3) Block will be moved to another datanode, + * if there is no available target storage type on local datanode. + */ @Test - public void testScheduleBlockWithinSameNode() throws Exception { + public void testScheduleBlockLocality() throws Exception { final Configuration conf = new HdfsConfiguration(); initConf(conf); - testWithinSameNode(conf); + testMovementWithLocalityOption(conf, true); // Test movement with hardlink, when same disk tiering is enabled. conf.setBoolean(DFSConfigKeys.DFS_DATANODE_ALLOW_SAME_DISK_TIERING, true); conf.setDouble(DFSConfigKeys .DFS_DATANODE_RESERVE_FOR_ARCHIVE_DEFAULT_PERCENTAGE, 0.5); - testWithinSameNode(conf); + testMovementWithLocalityOption(conf, true); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_ALLOW_SAME_DISK_TIERING, false); + testMovementWithLocalityOption(conf, false); } private void checkMovePaths(List actual, Path... expected) { @@ -1006,7 +1056,8 @@ private void initSecureConf(Configuration conf) throws Exception { /** * Test Mover runs fine when logging in with a keytab in kerberized env. - * Reusing testWithinSameNode here for basic functionality testing. + * Reusing testMovementWithLocalityOption + * here for basic functionality testing. */ @Test(timeout = 300000) public void testMoverWithKeytabs() throws Exception { @@ -1020,7 +1071,7 @@ public void testMoverWithKeytabs() throws Exception { @Override public Void run() throws Exception { // verify that mover runs Ok. - testWithinSameNode(conf); + testMovementWithLocalityOption(conf, true); // verify that UGI was logged in using keytab. Assert.assertTrue(UserGroupInformation.isLoginKeytabBased()); return null; From 013f9031bf6d4e16e0d10daa4c4fffd755272f84 Mon Sep 17 00:00:00 2001 From: Renukaprasad C <48682981+prasad-acit@users.noreply.github.com> Date: Wed, 24 Feb 2021 14:59:25 +0530 Subject: [PATCH 036/666] HADOOP-17510. Hadoop prints sensitive Cookie information. (#2673) --- .../hadoop/security/authentication/client/AuthenticatedURL.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java index 488400647cf06..32f4edfbc5710 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java @@ -153,7 +153,6 @@ private synchronized void setAuthCookie(HttpCookie cookie) { cookieHeaders = new HashMap<>(); cookieHeaders.put("Cookie", Arrays.asList(cookie.toString())); } - LOG.trace("Setting token value to {} ({})", authCookie, oldCookie); } private void setAuthCookieValue(String value) { From 7ac3cbcda4e0e2ef0b8c445bfd78a8008122886d Mon Sep 17 00:00:00 2001 From: S O'Donnell Date: Wed, 24 Feb 2021 10:29:36 +0000 Subject: [PATCH 037/666] HDFS-15422. Reported IBR is partially replaced with stored info when queuing. Contributed by Stephen O'Donnell and Kihwal Lee --- .../server/blockmanagement/BlockManager.java | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index d612fff53fa23..fdf300f913dd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -3211,10 +3211,11 @@ private void reportDiffSortedInner( // If the block is an out-of-date generation stamp or state, // but we're the standby, we shouldn't treat it as corrupt, // but instead just queue it for later processing. - // TODO: Pretty confident this should be s/storedBlock/block below, - // since we should be postponing the info of the reported block, not - // the stored block. See HDFS-6289 for more context. - queueReportedBlock(storageInfo, storedBlock, reportedState, + // Storing the reported block for later processing, as that is what + // comes from the IBR / FBR and hence what we should use to compare + // against the memory state. + // See HDFS-6289 and HDFS-15422 for more context. + queueReportedBlock(storageInfo, replica, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { toCorrupt.add(c); @@ -4276,10 +4277,11 @@ private boolean processAndHandleReportedBlock( // If the block is an out-of-date generation stamp or state, // but we're the standby, we shouldn't treat it as corrupt, // but instead just queue it for later processing. - // TODO: Pretty confident this should be s/storedBlock/block below, - // since we should be postponing the info of the reported block, not - // the stored block. See HDFS-6289 for more context. - queueReportedBlock(storageInfo, storedBlock, reportedState, + // Storing the reported block for later processing, as that is what + // comes from the IBR / FBR and hence what we should use to compare + // against the memory state. + // See HDFS-6289 and HDFS-15422 for more context. + queueReportedBlock(storageInfo, block, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { markBlockAsCorrupt(c, storageInfo, node); From 2a3626ab81e5968302f8e5b8ea7824cfeb5bc147 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Wed, 24 Feb 2021 20:41:35 +0100 Subject: [PATCH 038/666] YARN-10609. Update the document for YARN-10531(Be able to disable user limit factor for CapacityScheduler Leaf Queue). Contributed by Qi Zhu. --- .../hadoop-yarn-site/src/site/markdown/CapacityScheduler.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md index e5b23a7c1a3ea..8acf7230302ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md @@ -126,7 +126,7 @@ Configuration | `yarn.scheduler.capacity..capacity` | Queue *capacity* in percentage (%) as a float (e.g. 12.5) OR as absolute resource queue minimum capacity. The sum of capacities for all queues, at each level, must be equal to 100. However if absolute resource is configured, sum of absolute resources of child queues could be less than it's parent absolute resource capacity. Applications in the queue may consume more resources than the queue's capacity if there are free resources, providing elasticity. | | `yarn.scheduler.capacity..maximum-capacity` | Maximum queue capacity in percentage (%) as a float OR as absolute resource queue maximum capacity. This limits the *elasticity* for applications in the queue. 1) Value is between 0 and 100. 2) Admin needs to make sure absolute maximum capacity >= absolute capacity for each queue. Also, setting this value to -1 sets maximum capacity to 100%. | | `yarn.scheduler.capacity..minimum-user-limit-percent` | Each queue enforces a limit on the percentage of resources allocated to a user at any given time, if there is demand for resources. The user limit can vary between a minimum and maximum value. The former (the minimum value) is set to this property value and the latter (the maximum value) depends on the number of users who have submitted applications. For e.g., suppose the value of this property is 25. If two users have submitted applications to a queue, no single user can use more than 50% of the queue resources. If a third user submits an application, no single user can use more than 33% of the queue resources. With 4 or more users, no user can use more than 25% of the queues resources. A value of 100 implies no user limits are imposed. The default is 100. Value is specified as a integer. | -| `yarn.scheduler.capacity..user-limit-factor` | The multiple of the queue capacity which can be configured to allow a single user to acquire more resources. By default this is set to 1 which ensures that a single user can never take more than the queue's configured capacity irrespective of how idle the cluster is. Value is specified as a float. | +| `yarn.scheduler.capacity..user-limit-factor` | User limit factor provides a way to control the max amount of resources that a single user can consume. It is the multiple of the queue's capacity. By default this is set to 1 which ensures that a single user can never take more than the queue's configured capacity irrespective of how idle the cluster is. Increasing it means a single user can use more than the minimum capacity of the cluster, while decreasing it results in lower maximum resources. Setting this to -1 will disable the feature. Value is specified as a float. Note: using the flexible auto queue creation (yarn.scheduler.capacity..auto-queue-creation-v2) with weights will automatically set this property to -1, as the dynamic queues will be created with the hardcoded weight of 1 and in idle cluster scenarios they should be able to use more resources than calculated. | | `yarn.scheduler.capacity..maximum-allocation-mb` | The per queue maximum limit of memory to allocate to each container request at the Resource Manager. This setting overrides the cluster configuration `yarn.scheduler.maximum-allocation-mb`. This value must be smaller than or equal to the cluster maximum. | | `yarn.scheduler.capacity..maximum-allocation-vcores` | The per queue maximum limit of virtual cores to allocate to each container request at the Resource Manager. This setting overrides the cluster configuration `yarn.scheduler.maximum-allocation-vcores`. This value must be smaller than or equal to the cluster maximum. | | `yarn.scheduler.capacity..user-settings..weight` | This floating point value is used when calculating the user limit resource values for users in a queue. This value will weight each user more or less than the other users in the queue. For example, if user A should receive 50% more resources in a queue than users B and C, this property will be set to 1.5 for user A. Users B and C will default to 1.0. | From a8d71be7875c0e23027393baa682b442270a6dfd Mon Sep 17 00:00:00 2001 From: Jim Brennan Date: Thu, 25 Feb 2021 16:46:19 +0000 Subject: [PATCH 039/666] [YARN-10613] Config to allow Intra- and Inter-queue preemption to enable/disable conservativeDRF. Contributed by Eric Payne --- .../CapacitySchedulerPreemptionContext.java | 4 + .../capacity/FifoCandidatesSelector.java | 13 +++- .../IntraQueueCandidatesSelector.java | 3 +- .../ProportionalCapacityPreemptionPolicy.java | 34 +++++++- .../CapacitySchedulerConfiguration.java | 15 ++++ ...cityPreemptionPolicyInterQueueWithDRF.java | 72 +++++++++++++++++ ...cityPreemptionPolicyIntraQueueWithDRF.java | 78 +++++++++++++++++++ 7 files changed, 212 insertions(+), 7 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java index 7985296fcafa2..7f4fd91be4398 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java @@ -74,4 +74,8 @@ TempQueuePerPartition getQueueByPartition(String queueName, @Unstable IntraQueuePreemptionOrderPolicy getIntraQueuePreemptionOrderPolicy(); + + boolean getCrossQueuePreemptionConservativeDRF(); + + boolean getInQueuePreemptionConservativeDRF(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java index 28a74498af9a4..d9e9091bc86f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java @@ -116,7 +116,9 @@ public Map> selectCandidates( .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedCandidates, curCandidates, - totalPreemptionAllowed, false); + totalPreemptionAllowed, + preemptionContext.getCrossQueuePreemptionConservativeDRF() + ); if (!preempted) { continue; } @@ -193,7 +195,8 @@ private void preemptAMContainers(Resource clusterResource, boolean preempted = CapacitySchedulerPreemptionUtils .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, preemptMap, - curCandidates, totalPreemptionAllowed, false); + curCandidates, totalPreemptionAllowed, + preemptionContext.getCrossQueuePreemptionConservativeDRF()); if (preempted) { Resources.subtractFrom(skippedAMSize, c.getAllocatedResource()); } @@ -229,7 +232,8 @@ private void preemptFrom(FiCaSchedulerApp app, CapacitySchedulerPreemptionUtils .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedContainers, - curCandidates, totalPreemptionAllowed, false); + curCandidates, totalPreemptionAllowed, + preemptionContext.getCrossQueuePreemptionConservativeDRF()); if (!preemptionContext.isObserveOnly()) { preemptionContext.getRMContext().getDispatcher().getEventHandler() @@ -273,7 +277,8 @@ private void preemptFrom(FiCaSchedulerApp app, CapacitySchedulerPreemptionUtils .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedContainers, - curCandidates, totalPreemptionAllowed, false); + curCandidates, totalPreemptionAllowed, + preemptionContext.getCrossQueuePreemptionConservativeDRF()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java index 8a1b47b5dee3b..cea1bca773690 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java @@ -263,7 +263,8 @@ private void preemptFromLeastStarvedApp(LeafQueue leafQueue, boolean ret = CapacitySchedulerPreemptionUtils .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedCandidates, - curCandidates, totalPreemptedResourceAllowed, true); + curCandidates, totalPreemptedResourceAllowed, + preemptionContext.getInQueuePreemptionConservativeDRF()); // Subtract from respective user's resource usage once a container is // selected for preemption. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java index b343678fa6f1d..b6e7c65cce0d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java @@ -113,6 +113,9 @@ public enum IntraQueuePreemptionOrderPolicy { private float minimumThresholdForIntraQueuePreemption; private IntraQueuePreemptionOrderPolicy intraQueuePreemptionOrderPolicy; + private boolean crossQueuePreemptionConservativeDRF; + private boolean inQueuePreemptionConservativeDRF; + // Current configuration private CapacitySchedulerConfiguration csConfig; @@ -225,6 +228,18 @@ private void updateConfigIfNeeded() { CapacitySchedulerConfiguration.DEFAULT_INTRAQUEUE_PREEMPTION_ORDER_POLICY) .toUpperCase()); + crossQueuePreemptionConservativeDRF = config.getBoolean( + CapacitySchedulerConfiguration. + CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + CapacitySchedulerConfiguration. + DEFAULT_CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF); + + inQueuePreemptionConservativeDRF = config.getBoolean( + CapacitySchedulerConfiguration. + IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + CapacitySchedulerConfiguration. + DEFAULT_IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF); + candidatesSelectionPolicies = new ArrayList<>(); // Do we need white queue-priority preemption policy? @@ -300,7 +315,12 @@ private void updateConfigIfNeeded() { selectCandidatesForResevedContainers + "\n" + "additional_res_balance_based_on_reserved_containers = " + additionalPreemptionBasedOnReservedResource + "\n" + - "Preemption-to-balance-queue-enabled = " + isPreemptionToBalanceRequired); + "Preemption-to-balance-queue-enabled = " + + isPreemptionToBalanceRequired + "\n" + + "cross-queue-preemption.conservative-drf = " + + crossQueuePreemptionConservativeDRF + "\n" + + "in-queue-preemption.conservative-drf = " + + inQueuePreemptionConservativeDRF); csConfig = config; } @@ -425,7 +445,7 @@ private Set getLeafQueueNames(TempQueuePerPartition q) { return leafQueueNames; } - + /** * This method selects and tracks containers to be preemptionCandidates. If a container * is in the target list for more than maxWaitTime it is killed. @@ -784,6 +804,16 @@ public IntraQueuePreemptionOrderPolicy getIntraQueuePreemptionOrderPolicy() { return intraQueuePreemptionOrderPolicy; } + @Override + public boolean getCrossQueuePreemptionConservativeDRF() { + return crossQueuePreemptionConservativeDRF; + } + + @Override + public boolean getInQueuePreemptionConservativeDRF() { + return inQueuePreemptionConservativeDRF; + } + @Override public long getDefaultMaximumKillWaitTimeout() { return maxWaitTime; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 08b38a1707e60..8e605964e4819 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -1742,6 +1742,21 @@ public void setAllowZeroCapacitySum(String queue, boolean value) { + INTRA_QUEUE_PREEMPTION_CONFIG_PREFIX + "preemption-order-policy"; public static final String DEFAULT_INTRAQUEUE_PREEMPTION_ORDER_POLICY = "userlimit_first"; + /** + * Flag to determine whether or not to preempt containers from apps where some + * used resources are less than the user's user limit. + */ + public static final String CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF = + PREEMPTION_CONFIG_PREFIX + "conservative-drf"; + public static final Boolean DEFAULT_CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF = + false; + + public static final String IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF = + PREEMPTION_CONFIG_PREFIX + INTRA_QUEUE_PREEMPTION_CONFIG_PREFIX + + "conservative-drf"; + public static final Boolean DEFAULT_IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF = + true; + /** * Should we allow queues continue grow after all queue reaches their * guaranteed capacity. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java index c42867a0a0041..27208020185f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java @@ -19,9 +19,11 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.junit.Before; @@ -29,8 +31,10 @@ import java.io.IOException; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -174,4 +178,72 @@ public void test3ResourceTypesInterQueuePreemption() throws IOException { new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } + + @SuppressWarnings("unchecked") + @Test + public void testInterQueuePreemptionWithStrictAndRelaxedDRF() + throws IOException { + + /* + * root + * / \ \ + * a b c + * + * A / B / C have 33.3 / 33.3 / 33.4 resources + * Total cluster resource have mem=61440, cpu=600 + * + * +=================+========================+ + * | used in queue a | user limit for queue a | + * +=================+========================+ + * | 61440:60 | 20480:200 | + * +=================+========================+ + * In this case, the used memory is over the user limit but the used vCores + * is not. If conservative DRF is true, preemptions will not occur. + * If conservative DRF is false (default) preemptions will occur. + */ + String labelsConfig = "=61440:600,true;"; + String nodesConfig = "n1= res=61440:600"; // n1 is default partition + String queuesConfig = + // guaranteed,max,used,pending,reserved + "root(=[61440:600 61440:600 61440:600 20480:20 0]);" + // root + "-a(=[20480:200 61440:600 61440:60 0:0 0]);" + // b + "-b(=[20480:200 61440:600 0:0 20480:20 0]);" + // a + "-c(=[20480:200 61440:600 0:0 0:0 0])"; // c + String appsConfig = + //queueName\t(priority,resource,host,expression,#repeat,reserved) + "a\t" + "(1,1024:1,n1,,60,false,0:0,user1);" + // app1 in a + "b\t" + "(1,0:0,n1,,0,false,20480:20,user2);"; // app2 in b + + conf.setBoolean( + CapacitySchedulerConfiguration.CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + true); + + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + Resource ul = Resource.newInstance(20480, 20); + when(((LeafQueue)(cs.getQueue("root.a"))) + .getResourceLimitForAllUsers(any(), any(), any(), any()) + ).thenReturn(ul); + policy.editSchedule(); + + verify(eventHandler, times(0)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + + reset(eventHandler); + + conf.setBoolean( + CapacitySchedulerConfiguration.CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + false); + + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + ul = Resource.newInstance(20480, 20); + when(((LeafQueue)(cs.getQueue("root.a"))) + .getResourceLimitForAllUsers(any(), any(), any(), any()) + ).thenReturn(ul); + policy.editSchedule(); + + verify(eventHandler, times(20)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java index 940d11ceda990..ee88d3161b2e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java @@ -18,16 +18,20 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.junit.Before; import org.junit.Test; import java.io.IOException; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -111,6 +115,80 @@ public void testSimpleIntraQueuePreemptionWithVCoreResource() getAppAttemptId(3)))); } + @SuppressWarnings("unchecked") + @Test + public void testIntraQueuePreemptionFairOrderingWithStrictAndRelaxedDRF() + throws IOException { + /** + * Continue to allow intra-queue preemption when only one of the user's + * resources is above the user limit. + * Queue structure is: + * + *
+     *       root
+     *     /  |
+     *    a   b
+     * 
+ * + * Guaranteed resource of a and b are 30720:300 and 30720:300 Total cluster + * resource = 61440:600. + * Scenario: Queue B has one running app using 61720:60 resources with no + * pending resources, and one app with no used resources and 30720:30 + * pending resources. + * + * The first part of the test is to show what happens when the conservative + * DRF property is set. Since the memory is above and the vcores is below + * the user limit, only the minimum number of containers is allowed. + * In the second part, since conservative DRF is relaxed, all containers + * needed are allowed to be preempted (minus the AM size). + */ + + conf.set(CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ORDER_POLICY, + "userlimit_first"); + conf.set(CapacitySchedulerConfiguration.PREFIX + + "root.b." + CapacitySchedulerConfiguration.ORDERING_POLICY, "fair"); + conf.setBoolean( + CapacitySchedulerConfiguration.IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + true); + + String labelsConfig = "=61440:600,true;"; + String nodesConfig = // n1 has no label + "n1= res=61440:600"; + String queuesConfig = + // guaranteed,max,used,pending,reserved + "root(=[61440:600 61440:600 61440:600 30720:30 0]);" + // root + "-a(=[30720:300 61440:600 0:0 0:0 0]);" + // a + "-b(=[30720:300 61440:600 61440:60 30720:30 0]);"; // b + + String appsConfig = + "b\t" + "(1,1024:1,n1,,60,false,0:0,user1);" + // app1 in b + "b\t" + "(1,0:0,n1,,0,false,30720:30,user3);"; // app2 in b + + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + Resource ul = Resource.newInstance(30720, 300); + when(((LeafQueue)(cs.getQueue("root.b"))) + .getResourceLimitForAllUsers(any(), any(), any(), any()) + ).thenReturn(ul); + policy.editSchedule(); + + verify(eventHandler, times(6)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + reset(eventHandler); + + conf.setBoolean( + CapacitySchedulerConfiguration.IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + false); + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + when(((LeafQueue)(cs.getQueue("root.b"))) + .getResourceLimitForAllUsers(any(), any(), any(), any()) + ).thenReturn(ul); + policy.editSchedule(); + verify(eventHandler, times(29)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + } + @Test public void testIntraQueuePreemptionWithDominantVCoreResource() throws IOException { From 7a4b30766b5ac9efa3a3023f16131d10d9b87d0f Mon Sep 17 00:00:00 2001 From: Jonathan Hung Date: Thu, 25 Feb 2021 15:08:43 -0800 Subject: [PATCH 040/666] YARN-10651. CapacityScheduler crashed with NPE in AbstractYarnScheduler.updateNodeResource(). Contributed by Haibo Chen --- .../resourcemanager/scheduler/AbstractYarnScheduler.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index f95b30b1e5ae6..97e25d5cafbb9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -848,6 +848,11 @@ public void updateNodeResource(RMNode nm, writeLock.lock(); try { SchedulerNode node = getSchedulerNode(nm.getNodeID()); + if (node == null) { + LOG.info("Node: " + nm.getNodeID() + " has already been taken out of " + + "scheduling. Skip updating its resource"); + return; + } Resource newResource = resourceOption.getResource(); final int timeout = resourceOption.getOverCommitTimeout(); Resource oldResource = node.getTotalResource(); From 97f576668b97ef1fe84e641ec5d057a686a26000 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Fri, 26 Feb 2021 19:03:59 +0000 Subject: [PATCH 041/666] YARN-10653. Fixed the findbugs issues introduced by YARN-10647. Contributed by Qi Zhu --- .../apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java index 5b9382fcff2e3..c19faaaad3fcd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java @@ -646,7 +646,7 @@ protected void internalUpdateLabelsOnNodes( host.labels.addAll(labels); for (Node node : host.nms.values()) { replaceNodeForLabels(node.nodeId, node.labels, labels); - if (node.labels != null && labels != null) { + if (node.labels != null) { replaceLabelsForNode(node.nodeId, node.labels, labels); } node.labels = null; From ba674184931bb5fa2cb1c0f89c36894f000c3789 Mon Sep 17 00:00:00 2001 From: Ahmed Hussein <50450311+amahussein@users.noreply.github.com> Date: Fri, 26 Feb 2021 13:42:33 -0600 Subject: [PATCH 042/666] MAPREDUCE-7320. organize test directories for ClusterMapReduceTestCase (#2722). Contributed by Ahmed Hussein --- .../apache/hadoop/test/GenericTestUtils.java | 16 +++++ .../org/apache/hadoop/util/JarFinder.java | 8 ++- .../mapred/ClusterMapReduceTestCase.java | 62 +++++++------------ .../mapred/MiniMRClientClusterFactory.java | 5 +- .../apache/hadoop/mapred/TestBadRecords.java | 9 ++- .../mapred/TestClusterMapReduceTestCase.java | 9 ++- .../org/apache/hadoop/mapred/TestJobName.java | 7 +++ .../hadoop/mapred/TestMRCJCJobClient.java | 9 ++- .../hadoop/mapreduce/TestMRJobClient.java | 6 ++ .../security/ssl/TestEncryptedShuffle.java | 30 ++++----- .../mapreduce/v2/MiniMRYarnCluster.java | 5 ++ .../streaming/TestStreamingBadRecords.java | 9 ++- .../hadoop/yarn/server/MiniYARNCluster.java | 10 ++- 13 files changed, 118 insertions(+), 67 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index ade6cb4c7b8a0..e266f285685ac 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -229,6 +229,22 @@ public static int uniqueSequenceId() { return sequence.incrementAndGet(); } + /** + * Creates a directory for the data/logs of the unit test. + * It first deletes the directory if it exists. + * + * @param testClass the unit test class. + * @return the Path of the root directory. + */ + public static File setupTestRootDir(Class testClass) { + File testRootDir = getTestDir(testClass.getSimpleName()); + if (testRootDir.exists()) { + FileUtil.fullyDelete(testRootDir); + } + testRootDir.mkdirs(); + return testRootDir; + } + /** * Get the (created) base directory for tests. * @return the absolute directory diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java index 5e0bfc2399270..3f1bb2d61dc92 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java @@ -132,6 +132,10 @@ private static void createJar(File dir, File jarFile) throws IOException { * @return path to the Jar containing the class. */ public static String getJar(Class klass) { + return getJar(klass, null); + } + + public static String getJar(Class klass, String testSubDir) { Preconditions.checkNotNull(klass, "klass"); ClassLoader loader = klass.getClassLoader(); if (loader != null) { @@ -154,7 +158,9 @@ else if ("file".equals(url.getProtocol())) { klassName = klassName.replace(".", "/") + ".class"; path = path.substring(0, path.length() - klassName.length()); File baseDir = new File(path); - File testDir = GenericTestUtils.getTestDir(); + File testDir = + testSubDir == null ? GenericTestUtils.getTestDir() + : GenericTestUtils.getTestDir(testSubDir); testDir = testDir.getAbsoluteFile(); if (!testDir.exists()) { testDir.mkdirs(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java index 8d33b1580a8f8..f16b8a0f18fc2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java @@ -20,9 +20,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.GenericTestUtils; + import org.junit.After; import org.junit.Before; +import java.io.File; import java.io.IOException; import java.util.Map; import java.util.Properties; @@ -43,8 +46,18 @@ * The DFS filesystem is formated before the testcase starts and after it ends. */ public abstract class ClusterMapReduceTestCase { + private static File testRootDir; + private static File dfsFolder; + private MiniDFSCluster dfsCluster = null; - private MiniMRCluster mrCluster = null; + private MiniMRClientCluster mrCluster = null; + + protected static void setupClassBase(Class testClass) throws Exception { + // setup the test root directory + testRootDir = GenericTestUtils.setupTestRootDir(testClass); + dfsFolder = new File(testRootDir, "dfs"); + } + /** * Creates Hadoop Cluster and DFS before a test case is run. @@ -78,37 +91,10 @@ protected synchronized void startCluster(boolean reformatDFS, Properties props) conf.set((String) entry.getKey(), (String) entry.getValue()); } } - dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2) - .format(reformatDFS).racks(null).build(); - - ConfigurableMiniMRCluster.setConfiguration(props); - //noinspection deprecation - mrCluster = new ConfigurableMiniMRCluster(2, - getFileSystem().getUri().toString(), 1, conf); - } - } - - private static class ConfigurableMiniMRCluster extends MiniMRCluster { - private static Properties config; - - public static void setConfiguration(Properties props) { - config = props; - } - - public ConfigurableMiniMRCluster(int numTaskTrackers, String namenode, - int numDir, JobConf conf) - throws Exception { - super(0,0, numTaskTrackers, namenode, numDir, null, null, null, conf); - } - - public JobConf createJobConf() { - JobConf conf = super.createJobConf(); - if (config != null) { - for (Map.Entry entry : config.entrySet()) { - conf.set((String) entry.getKey(), (String) entry.getValue()); - } - } - return conf; + dfsCluster = + new MiniDFSCluster.Builder(conf, dfsFolder) + .numDataNodes(2).format(reformatDFS).racks(null).build(); + mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf); } } @@ -125,7 +111,7 @@ public JobConf createJobConf() { */ protected void stopCluster() throws Exception { if (mrCluster != null) { - mrCluster.shutdown(); + mrCluster.stop(); mrCluster = null; } if (dfsCluster != null) { @@ -157,17 +143,13 @@ protected FileSystem getFileSystem() throws IOException { return dfsCluster.getFileSystem(); } - protected MiniMRCluster getMRCluster() { - return mrCluster; - } - /** * Returns the path to the root directory for the testcase. * * @return path to the root directory for the testcase. */ protected Path getTestRootDir() { - return new Path("x").getParent(); + return new Path(testRootDir.getPath()); } /** @@ -194,8 +176,8 @@ protected Path getOutputDir() { * * @return configuration that works on the testcase Hadoop instance */ - protected JobConf createJobConf() { - return mrCluster.createJobConf(); + protected JobConf createJobConf() throws IOException { + return new JobConf(mrCluster.getConfig()); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java index 85c534bfb888a..33b85b92042cf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java @@ -55,7 +55,8 @@ public static MiniMRClientCluster create(Class caller, String identifier, Path appJar = new Path(testRootDir, "MRAppJar.jar"); // Copy MRAppJar and make it private. - Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR); + Path appMasterJar = + new Path(MiniMRYarnCluster.copyAppJarIntoTestDir(identifier)); fs.copyFromLocalFile(appMasterJar, appJar); fs.setPermission(appJar, new FsPermission("744")); @@ -64,7 +65,7 @@ public static MiniMRClientCluster create(Class caller, String identifier, job.addFileToClassPath(appJar); - Path callerJar = new Path(JarFinder.getJar(caller)); + Path callerJar = new Path(JarFinder.getJar(caller, identifier)); Path remoteCallerJar = new Path(testRootDir, callerJar.getName()); fs.copyFromLocalFile(callerJar, remoteCallerJar); fs.setPermission(remoteCallerJar, new FsPermission("744")); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java index b45a2a6f8cfb4..1b39583bd729f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java @@ -37,6 +37,8 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.TaskCounter; import org.apache.hadoop.util.ReflectionUtils; + +import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger; @@ -58,7 +60,12 @@ public class TestBadRecords extends ClusterMapReduceTestCase { Arrays.asList("hello08","hello10"); private List input; - + + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestBadRecords.class); + } + public TestBadRecords() { input = new ArrayList(); for(int i=1;i<=10;i++) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java index f04fbd7a29af2..b4e8de2723c57 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java @@ -29,6 +29,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; + +import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.assertTrue; @@ -36,6 +38,12 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertFalse; public class TestClusterMapReduceTestCase extends ClusterMapReduceTestCase { + + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestClusterMapReduceTestCase.class); + } + public void _testMapReduce(boolean restart) throws Exception { OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt")); Writer wr = new OutputStreamWriter(os); @@ -88,7 +96,6 @@ public void _testMapReduce(boolean restart) throws Exception { reader.close(); assertEquals(4, counter); } - } @Test diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobName.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobName.java index 2659a14a70b1d..f50089af4a1e1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobName.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobName.java @@ -29,12 +29,19 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.lib.IdentityMapper; + +import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; public class TestJobName extends ClusterMapReduceTestCase { + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestJobName.class); + } + @Test public void testComplexName() throws Exception { OutputStream os = getFileSystem().create(new Path(getInputDir(), diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCJobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCJobClient.java index 9a2af0c7be9ea..9a2c744d8c656 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCJobClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCJobClient.java @@ -29,10 +29,17 @@ import org.apache.hadoop.mapreduce.TestMRJobClient; import org.apache.hadoop.mapreduce.tools.CLI; import org.apache.hadoop.util.Tool; + +import org.junit.BeforeClass; import org.junit.Ignore; @Ignore public class TestMRCJCJobClient extends TestMRJobClient { - + + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestMRCJCJobClient.class); + } + private String runJob() throws Exception { OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt")); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java index f4ccc569af963..31b90aa0e506c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java @@ -30,6 +30,7 @@ import org.apache.hadoop.util.ToolRunner; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; +import org.junit.BeforeClass; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,6 +64,11 @@ public class TestMRJobClient extends ClusterMapReduceTestCase { private static final Logger LOG = LoggerFactory.getLogger(TestMRJobClient.class); + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestMRJobClient.class); + } + private Job runJob(Configuration conf) throws Exception { String input = "hello1\nhello2\nhello3\n"; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/ssl/TestEncryptedShuffle.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/ssl/TestEncryptedShuffle.java index d870d25b9af75..ed80f658f6d96 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/ssl/TestEncryptedShuffle.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/ssl/TestEncryptedShuffle.java @@ -19,7 +19,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -31,58 +30,55 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.junit.After; -import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.Assert; -import java.io.BufferedReader; import java.io.File; -import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.Writer; -import java.net.URL; public class TestEncryptedShuffle { - private static final String BASEDIR = - System.getProperty("test.build.dir", "target/test-dir") + "/" + - TestEncryptedShuffle.class.getSimpleName(); - - private String classpathDir; + private static File testRootDir; @BeforeClass public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); + testRootDir = + GenericTestUtils.setupTestRootDir(TestEncryptedShuffle.class); } @Before public void createCustomYarnClasspath() throws Exception { classpathDir = KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class); new File(classpathDir, "core-site.xml").delete(); + dfsFolder = new File(testRootDir, String.format("dfs-%d", + Time.monotonicNow())); } @After public void cleanUpMiniClusterSpecialConfig() throws Exception { new File(classpathDir, "core-site.xml").delete(); - String keystoresDir = new File(BASEDIR).getAbsolutePath(); + String keystoresDir = testRootDir.getAbsolutePath(); KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, classpathDir); } + private String classpathDir; private MiniDFSCluster dfsCluster = null; private MiniMRClientCluster mrCluster = null; + private File dfsFolder; private void startCluster(Configuration conf) throws Exception { if (System.getProperty("hadoop.log.dir") == null) { - System.setProperty("hadoop.log.dir", "target/test-dir"); + System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath()); } conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); @@ -92,7 +88,7 @@ private void startCluster(Configuration conf) throws Exception { YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) + File.pathSeparator + classpathDir; conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp); - dfsCluster = new MiniDFSCluster.Builder(conf).build(); + dfsCluster = new MiniDFSCluster.Builder(conf, dfsFolder).build(); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); @@ -129,7 +125,7 @@ private void encryptedShuffleWithCerts(boolean useClientCerts) throws Exception { try { Configuration conf = new Configuration(); - String keystoresDir = new File(BASEDIR).getAbsolutePath(); + String keystoresDir = testRootDir.getAbsolutePath(); String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java index dbd87e24914a8..e41c95c490358 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java @@ -78,6 +78,7 @@ public MiniMRYarnCluster(String testName) { this(testName, 1); } + @SuppressWarnings("deprecation") public MiniMRYarnCluster(String testName, int noOfNMs) { this(testName, noOfNMs, false); } @@ -87,6 +88,10 @@ public MiniMRYarnCluster(String testName, int noOfNMs, boolean enableAHS) { super(testName, 1, noOfNMs, 4, 4, enableAHS); } + public static String copyAppJarIntoTestDir(String testSubdir) { + return JarFinder.getJar(LocalContainerLauncher.class, testSubdir); + } + public static String getResolvedMRHistoryWebAppURLWithoutScheme( Configuration conf, boolean isSSLEnabled) { InetSocketAddress address = null; diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java index 0ef1ff0b32872..5a4e3a960d2be 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java @@ -31,13 +31,13 @@ import java.util.Properties; import java.util.StringTokenizer; +import org.junit.BeforeClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.ClusterMapReduceTestCase; import org.apache.hadoop.mapred.Counters; -import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RunningJob; import org.apache.hadoop.mapred.SkipBadRecords; import org.apache.hadoop.mapred.Utils; @@ -65,7 +65,12 @@ public class TestStreamingBadRecords extends ClusterMapReduceTestCase private static final String badReducer = UtilTest.makeJavaCommand(BadApp.class, new String[]{"true"}); private static final int INPUTSIZE=100; - + + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestStreamingBadRecords.class); + } + public TestStreamingBadRecords() throws IOException { UtilTest utilTest = new UtilTest(getClass().getName()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java index 352a2d0d64082..828b5c43af4cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java @@ -41,8 +41,10 @@ import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ShellCommandExecutor; +import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.conf.HAUtil; @@ -171,8 +173,11 @@ public MiniYARNCluster( this.numLocalDirs = numLocalDirs; this.numLogDirs = numLogDirs; this.enableAHS = enableAHS; - String testSubDir = testName.replace("$", ""); - File targetWorkDir = new File("target", testSubDir); + String yarnFolderName = String.format("yarn-%d", Time.monotonicNow()); + File targetWorkDirRoot = GenericTestUtils.getTestDir(getName()); + // make sure that the folder exists + targetWorkDirRoot.mkdirs(); + File targetWorkDir = new File(targetWorkDirRoot, yarnFolderName); try { FileContext.getLocalFSFileContext().delete( new Path(targetWorkDir.getAbsolutePath()), true); @@ -227,6 +232,7 @@ public MiniYARNCluster( * @param numLocalDirs the number of nm-local-dirs per nodemanager * @param numLogDirs the number of nm-log-dirs per nodemanager */ + @SuppressWarnings("deprecation") public MiniYARNCluster( String testName, int numResourceManagers, int numNodeManagers, int numLocalDirs, int numLogDirs) { From 02021e441afc1f20e83fbe2494f53f09ab844e1d Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Fri, 26 Feb 2021 21:57:54 +0100 Subject: [PATCH 043/666] YARN-10627. Extend logging to give more information about weight mode. Contributed by Benjamin Teke. --- .../scheduler/capacity/AbstractCSQueue.java | 9 ++ .../scheduler/capacity/LeafQueue.java | 19 ++++- .../scheduler/capacity/ParentQueue.java | 6 +- .../TestCapacitySchedulerWeightMode.java | 84 +++++++++++++++---- 4 files changed, 95 insertions(+), 23 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 12ce05f2791bb..28e2d54c49426 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -1633,4 +1633,13 @@ public void setDynamicQueue(boolean dynamicQueue) { writeLock.unlock(); } } + + protected String getCapacityOrWeightString() { + if (queueCapacities.getWeight() != -1) { + return "weight=" + queueCapacities.getWeight() + ", " + + "normalizedWeight=" + queueCapacities.getNormalizedWeight(); + } else { + return "capacity=" + queueCapacities.getCapacity(); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 6bf8d0a471ab8..104a89caee0c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -268,9 +268,8 @@ protected void setupQueueConfigs(Resource clusterResource, usersManager.updateUserWeights(); LOG.info( - "Initializing " + getQueuePath() + "\n" + "capacity = " - + queueCapacities.getCapacity() - + " [= (float) configuredCapacity / 100 ]" + "\n" + "Initializing " + getQueuePath() + "\n" + + getExtendedCapacityOrWeightString() + "\n" + "absoluteCapacity = " + queueCapacities.getAbsoluteCapacity() + " [= parentAbsoluteCapacity * capacity ]" + "\n" + "maxCapacity = " + queueCapacities.getMaximumCapacity() @@ -486,7 +485,7 @@ public QueueInfo getQueueInfo( public String toString() { readLock.lock(); try { - return getQueuePath() + ": " + "capacity=" + queueCapacities.getCapacity() + return getQueuePath() + ": " + getCapacityOrWeightString() + ", " + "absoluteCapacity=" + queueCapacities.getAbsoluteCapacity() + ", " + "usedResources=" + queueUsage.getUsed() + ", " + "usedCapacity=" + getUsedCapacity() + ", " + "absoluteUsedCapacity=" @@ -499,7 +498,19 @@ public String toString() { } finally { readLock.unlock(); } + } + protected String getExtendedCapacityOrWeightString() { + if (queueCapacities.getWeight() != -1) { + return "weight = " + queueCapacities.getWeight() + + " [= (float) configuredCapacity (with w suffix)] " + "\n" + + "normalizedWeight = " + queueCapacities.getNormalizedWeight() + + " [= (float) configuredCapacity / sum(configuredCapacity of " + + "all queues under the parent)]"; + } else { + return "capacity = " + queueCapacities.getCapacity() + + " [= (float) configuredCapacity / 100 ]"; + } } @VisibleForTesting diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index f79ffcbb33c52..ce5e49040e678 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -173,7 +173,7 @@ protected void setupQueueConfigs(Resource clusterResource, ((ParentQueue) parent).getQueueOrderingPolicyConfigName()); queueOrderingPolicy.setQueues(childQueues); - LOG.info(queueName + ", capacity=" + this.queueCapacities.getCapacity() + LOG.info(queueName + ", " + getCapacityOrWeightString() + ", absoluteCapacity=" + this.queueCapacities.getAbsoluteCapacity() + ", maxCapacity=" + this.queueCapacities.getMaximumCapacity() + ", absoluteMaxCapacity=" + this.queueCapacities @@ -462,8 +462,8 @@ public List getQueueUserAclInfo( public String toString() { return queueName + ": " + - "numChildQueue= " + childQueues.size() + ", " + - "capacity=" + queueCapacities.getCapacity() + ", " + + "numChildQueue= " + childQueues.size() + ", " + + getCapacityOrWeightString() + ", " + "absoluteCapacity=" + queueCapacities.getAbsoluteCapacity() + ", " + "usedResources=" + queueUsage.getUsed() + "usedCapacity=" + getUsedCapacity() + ", " + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java index bdf4d8d455e09..1742ae45f35ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java @@ -45,11 +45,16 @@ import org.junit.Before; import org.junit.Test; +import java.io.IOException; import java.util.ArrayList; import java.util.Set; public class TestCapacitySchedulerWeightMode { - private final int GB = 1024; + private static final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + private static final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + private static final String A1 = A + ".a1"; + private static final String B1 = B + ".b1"; + private static final String B2 = B + ".b2"; private YarnConfiguration conf; @@ -91,14 +96,12 @@ public static Configuration getCSConfWithQueueLabelsWeightOnly( conf.setLabeledQueueWeight(CapacitySchedulerConfiguration.ROOT, "y", 100); conf.setLabeledQueueWeight(CapacitySchedulerConfiguration.ROOT, "z", 100); - final String A = CapacitySchedulerConfiguration.ROOT + ".a"; conf.setLabeledQueueWeight(A, RMNodeLabelsManager.NO_LABEL, 1); conf.setMaximumCapacity(A, 10); conf.setAccessibleNodeLabels(A, toSet("x", "y")); conf.setLabeledQueueWeight(A, "x", 100); conf.setLabeledQueueWeight(A, "y", 50); - final String B = CapacitySchedulerConfiguration.ROOT + ".b"; conf.setLabeledQueueWeight(B, RMNodeLabelsManager.NO_LABEL, 9); conf.setMaximumCapacity(B, 100); conf.setAccessibleNodeLabels(B, toSet("y", "z")); @@ -106,7 +109,6 @@ public static Configuration getCSConfWithQueueLabelsWeightOnly( conf.setLabeledQueueWeight(B, "z", 100); // Define 2nd-level queues - final String A1 = A + ".a1"; conf.setQueues(A, new String[] { "a1" }); conf.setLabeledQueueWeight(A1, RMNodeLabelsManager.NO_LABEL, 100); conf.setMaximumCapacity(A1, 100); @@ -116,12 +118,10 @@ public static Configuration getCSConfWithQueueLabelsWeightOnly( conf.setLabeledQueueWeight(A1, "y", 100); conf.setQueues(B, new String[] { "b1", "b2" }); - final String B1 = B + ".b1"; conf.setLabeledQueueWeight(B1, RMNodeLabelsManager.NO_LABEL, 50); conf.setMaximumCapacity(B1, 50); conf.setAccessibleNodeLabels(B1, RMNodeLabelsManager.EMPTY_STRING_SET); - final String B2 = B + ".b2"; conf.setLabeledQueueWeight(B2, RMNodeLabelsManager.NO_LABEL, 50); conf.setMaximumCapacity(B2, 50); conf.setAccessibleNodeLabels(B2, toSet("y", "z")); @@ -155,14 +155,12 @@ public static Configuration getCSConfWithLabelsParentUseWeightChildUsePct( conf.setLabeledQueueWeight(CapacitySchedulerConfiguration.ROOT, "y", 100); conf.setLabeledQueueWeight(CapacitySchedulerConfiguration.ROOT, "z", 100); - final String A = CapacitySchedulerConfiguration.ROOT + ".a"; conf.setLabeledQueueWeight(A, RMNodeLabelsManager.NO_LABEL, 1); conf.setMaximumCapacity(A, 10); conf.setAccessibleNodeLabels(A, toSet("x", "y")); conf.setLabeledQueueWeight(A, "x", 100); conf.setLabeledQueueWeight(A, "y", 50); - final String B = CapacitySchedulerConfiguration.ROOT + ".b"; conf.setLabeledQueueWeight(B, RMNodeLabelsManager.NO_LABEL, 9); conf.setMaximumCapacity(B, 100); conf.setAccessibleNodeLabels(B, toSet("y", "z")); @@ -170,7 +168,6 @@ public static Configuration getCSConfWithLabelsParentUseWeightChildUsePct( conf.setLabeledQueueWeight(B, "z", 100); // Define 2nd-level queues - final String A1 = A + ".a1"; conf.setQueues(A, new String[] { "a1" }); conf.setCapacityByLabel(A1, RMNodeLabelsManager.NO_LABEL, 100); conf.setMaximumCapacity(A1, 100); @@ -180,12 +177,10 @@ public static Configuration getCSConfWithLabelsParentUseWeightChildUsePct( conf.setCapacityByLabel(A1, "y", 100); conf.setQueues(B, new String[] { "b1", "b2" }); - final String B1 = B + ".b1"; conf.setCapacityByLabel(B1, RMNodeLabelsManager.NO_LABEL, 50); conf.setMaximumCapacity(B1, 50); conf.setAccessibleNodeLabels(B1, RMNodeLabelsManager.EMPTY_STRING_SET); - final String B2 = B + ".b2"; conf.setCapacityByLabel(B2, RMNodeLabelsManager.NO_LABEL, 50); conf.setMaximumCapacity(B2, 50); conf.setAccessibleNodeLabels(B2, toSet("y", "z")); @@ -219,14 +214,12 @@ public static Configuration getCSConfWithLabelsParentUsePctChildUseWeight( conf.setLabeledQueueWeight(CapacitySchedulerConfiguration.ROOT, "y", 100); conf.setLabeledQueueWeight(CapacitySchedulerConfiguration.ROOT, "z", 100); - final String A = CapacitySchedulerConfiguration.ROOT + ".a"; conf.setCapacityByLabel(A, RMNodeLabelsManager.NO_LABEL, 10); conf.setMaximumCapacity(A, 10); conf.setAccessibleNodeLabels(A, toSet("x", "y")); conf.setCapacityByLabel(A, "x", 100); conf.setCapacityByLabel(A, "y", 50); - final String B = CapacitySchedulerConfiguration.ROOT + ".b"; conf.setCapacityByLabel(B, RMNodeLabelsManager.NO_LABEL, 90); conf.setMaximumCapacity(B, 100); conf.setAccessibleNodeLabels(B, toSet("y", "z")); @@ -234,7 +227,6 @@ public static Configuration getCSConfWithLabelsParentUsePctChildUseWeight( conf.setCapacityByLabel(B, "z", 100); // Define 2nd-level queues - final String A1 = A + ".a1"; conf.setQueues(A, new String[] { "a1" }); conf.setCapacityByLabel(A1, RMNodeLabelsManager.NO_LABEL, 100); conf.setMaximumCapacity(A1, 100); @@ -244,12 +236,10 @@ public static Configuration getCSConfWithLabelsParentUsePctChildUseWeight( conf.setCapacityByLabel(A1, "y", 100); conf.setQueues(B, new String[] { "b1", "b2" }); - final String B1 = B + ".b1"; conf.setCapacityByLabel(B1, RMNodeLabelsManager.NO_LABEL, 50); conf.setMaximumCapacity(B1, 50); conf.setAccessibleNodeLabels(B1, RMNodeLabelsManager.EMPTY_STRING_SET); - final String B2 = B + ".b2"; conf.setCapacityByLabel(B2, RMNodeLabelsManager.NO_LABEL, 50); conf.setMaximumCapacity(B2, 50); conf.setAccessibleNodeLabels(B2, toSet("y", "z")); @@ -297,6 +287,57 @@ public void testContainerAllocateWithComplexLabelsWeightAndPercentMixed2() throw getCSConfWithLabelsParentUsePctChildUseWeight(conf)); } + /** + * This checks whether the parent prints the correct log about the + * configured mode. + */ + @Test(timeout = 300000) + public void testGetCapacityOrWeightStringUsingWeights() throws IOException { + try (MockRM rm = new MockRM( + getCSConfWithQueueLabelsWeightOnly(conf))) { + rm.start(); + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + + String capacityOrWeightString = ((ParentQueue) cs.getQueue(A)) + .getCapacityOrWeightString(); + validateCapacityOrWeightString(capacityOrWeightString, true); + + capacityOrWeightString = ((LeafQueue) cs.getQueue(A1)) + .getCapacityOrWeightString(); + validateCapacityOrWeightString(capacityOrWeightString, true); + + capacityOrWeightString = ((LeafQueue) cs.getQueue(A1)) + .getExtendedCapacityOrWeightString(); + validateCapacityOrWeightString(capacityOrWeightString, true); + } + } + + /** + * This checks whether the parent prints the correct log about the + * configured mode. + */ + @Test(timeout = 300000) + public void testGetCapacityOrWeightStringParentPctLeafWeights() + throws IOException { + try (MockRM rm = new MockRM( + getCSConfWithLabelsParentUseWeightChildUsePct(conf))) { + rm.start(); + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + + String capacityOrWeightString = ((ParentQueue) cs.getQueue(A)) + .getCapacityOrWeightString(); + validateCapacityOrWeightString(capacityOrWeightString, true); + + capacityOrWeightString = ((LeafQueue) cs.getQueue(A1)) + .getCapacityOrWeightString(); + validateCapacityOrWeightString(capacityOrWeightString, false); + + capacityOrWeightString = ((LeafQueue) cs.getQueue(A1)) + .getExtendedCapacityOrWeightString(); + validateCapacityOrWeightString(capacityOrWeightString, false); + } + } + private void internalTestContainerAlloationWithNodeLabel(Configuration csConf) throws Exception { /* @@ -449,4 +490,15 @@ private void checkTaskContainersHost(ApplicationAttemptId attemptId, } } } + + private void validateCapacityOrWeightString(String capacityOrWeightString, + boolean shouldContainWeight) { + Assert.assertEquals(shouldContainWeight, + capacityOrWeightString.contains("weight")); + Assert.assertEquals(shouldContainWeight, + capacityOrWeightString.contains("normalizedWeight")); + Assert.assertEquals(!shouldContainWeight, + capacityOrWeightString.contains("capacity")); + + } } From c3b2f54124fe55f6040b28c1effce1c3e2bfa559 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Sat, 27 Feb 2021 12:46:36 +0900 Subject: [PATCH 044/666] YARN-10656. Parsing error in CapacityScheduler.md (#2725) Reviewed-by: Peter Bacsko --- .../hadoop-yarn-site/src/site/markdown/CapacityScheduler.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md index 8acf7230302ca..1dec2da113e4b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md @@ -126,7 +126,7 @@ Configuration | `yarn.scheduler.capacity..capacity` | Queue *capacity* in percentage (%) as a float (e.g. 12.5) OR as absolute resource queue minimum capacity. The sum of capacities for all queues, at each level, must be equal to 100. However if absolute resource is configured, sum of absolute resources of child queues could be less than it's parent absolute resource capacity. Applications in the queue may consume more resources than the queue's capacity if there are free resources, providing elasticity. | | `yarn.scheduler.capacity..maximum-capacity` | Maximum queue capacity in percentage (%) as a float OR as absolute resource queue maximum capacity. This limits the *elasticity* for applications in the queue. 1) Value is between 0 and 100. 2) Admin needs to make sure absolute maximum capacity >= absolute capacity for each queue. Also, setting this value to -1 sets maximum capacity to 100%. | | `yarn.scheduler.capacity..minimum-user-limit-percent` | Each queue enforces a limit on the percentage of resources allocated to a user at any given time, if there is demand for resources. The user limit can vary between a minimum and maximum value. The former (the minimum value) is set to this property value and the latter (the maximum value) depends on the number of users who have submitted applications. For e.g., suppose the value of this property is 25. If two users have submitted applications to a queue, no single user can use more than 50% of the queue resources. If a third user submits an application, no single user can use more than 33% of the queue resources. With 4 or more users, no user can use more than 25% of the queues resources. A value of 100 implies no user limits are imposed. The default is 100. Value is specified as a integer. | -| `yarn.scheduler.capacity..user-limit-factor` | User limit factor provides a way to control the max amount of resources that a single user can consume. It is the multiple of the queue's capacity. By default this is set to 1 which ensures that a single user can never take more than the queue's configured capacity irrespective of how idle the cluster is. Increasing it means a single user can use more than the minimum capacity of the cluster, while decreasing it results in lower maximum resources. Setting this to -1 will disable the feature. Value is specified as a float. Note: using the flexible auto queue creation (yarn.scheduler.capacity..auto-queue-creation-v2) with weights will automatically set this property to -1, as the dynamic queues will be created with the hardcoded weight of 1 and in idle cluster scenarios they should be able to use more resources than calculated. | +| `yarn.scheduler.capacity..user-limit-factor` | User limit factor provides a way to control the max amount of resources that a single user can consume. It is the multiple of the queue's capacity. By default this is set to 1 which ensures that a single user can never take more than the queue's configured capacity irrespective of how idle the cluster is. Increasing it means a single user can use more than the minimum capacity of the cluster, while decreasing it results in lower maximum resources. Setting this to -1 will disable the feature. Value is specified as a float. Note: using the flexible auto queue creation (yarn.scheduler.capacity.\.auto-queue-creation-v2) with weights will automatically set this property to -1, as the dynamic queues will be created with the hardcoded weight of 1 and in idle cluster scenarios they should be able to use more resources than calculated. | | `yarn.scheduler.capacity..maximum-allocation-mb` | The per queue maximum limit of memory to allocate to each container request at the Resource Manager. This setting overrides the cluster configuration `yarn.scheduler.maximum-allocation-mb`. This value must be smaller than or equal to the cluster maximum. | | `yarn.scheduler.capacity..maximum-allocation-vcores` | The per queue maximum limit of virtual cores to allocate to each container request at the Resource Manager. This setting overrides the cluster configuration `yarn.scheduler.maximum-allocation-vcores`. This value must be smaller than or equal to the cluster maximum. | | `yarn.scheduler.capacity..user-settings..weight` | This floating point value is used when calculating the user limit resource values for users in a queue. This value will weight each user more or less than the other users in the queue. For example, if user A should receive 50% more resources in a queue than users B and C, this property will be set to 1.5 for user A. Users B and C will default to 1.0. | From 581d4d7ae357463085dc2eb6155b89123c46ff41 Mon Sep 17 00:00:00 2001 From: S O'Donnell Date: Mon, 1 Mar 2021 11:36:41 +0000 Subject: [PATCH 045/666] HDFS-14013. Skip any credentials stored in HDFS when starting ZKFC. Contributed by Stephen O'Donnell --- .../hadoop/ha/ZKFailoverController.java | 18 +++++++- .../tools/TestDFSZKFailoverController.java | 41 +++++++++++++++---- 2 files changed, 49 insertions(+), 10 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java index 16d7bf7ba4b5d..10459404a247a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java @@ -31,11 +31,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException; import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; +import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.util.ZKUtil; import org.apache.hadoop.util.ZKUtil.ZKAuthInfo; import org.apache.hadoop.ha.HealthMonitor.State; @@ -343,8 +346,19 @@ private void initZK() throws HadoopIllegalArgumentException, IOException, zkAcls = Ids.CREATOR_ALL_ACL; } - // Parse authentication from configuration. - List zkAuths = SecurityUtil.getZKAuthInfos(conf, ZK_AUTH_KEY); + // Parse authentication from configuration. Exclude any Credential providers + // using the hdfs scheme to avoid a circular dependency. As HDFS is likely + // not started when ZKFC is started, we cannot read the credentials from it. + Configuration c = conf; + try { + c = ProviderUtils.excludeIncompatibleCredentialProviders( + conf, FileSystem.getFileSystemClass("hdfs", conf)); + } catch (UnsupportedFileSystemException e) { + // Should not happen in a real cluster, as the hdfs FS will always be + // present. Inside tests, the hdfs filesystem will not be present + LOG.debug("No filesystem found for the hdfs scheme", e); + } + List zkAuths = SecurityUtil.getZKAuthInfos(c, ZK_AUTH_KEY); // Sanity check configuration. Preconditions.checkArgument(zkQuorum != null, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java index 8f60b1dac9985..0a7a87ca88f2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; @@ -93,14 +94,16 @@ public void setup() throws Exception { ServerSocketUtil.getPort(10023, 100)); conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", ServerSocketUtil.getPort(10024, 100)); + } + private void startCluster() throws Exception { // prefer non-ephemeral port to avoid port collision on restartNameNode MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1") - .setIpcPort(ServerSocketUtil.getPort(10021, 100))) - .addNN(new MiniDFSNNTopology.NNConf("nn2") - .setIpcPort(ServerSocketUtil.getPort(10022, 100)))); + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1") + .setIpcPort(ServerSocketUtil.getPort(10021, 100))) + .addNN(new MiniDFSNNTopology.NNConf("nn2") + .setIpcPort(ServerSocketUtil.getPort(10022, 100)))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) @@ -113,16 +116,16 @@ public void setup() throws Exception { thr1.start(); waitForHAState(0, HAServiceState.ACTIVE); - + ctx.addThread(thr2 = new ZKFCThread(ctx, 1)); thr2.start(); - + // Wait for the ZKFCs to fully start up ZKFCTestUtil.waitForHealthState(thr1.zkfc, HealthMonitor.State.SERVICE_HEALTHY, ctx); ZKFCTestUtil.waitForHealthState(thr2.zkfc, HealthMonitor.State.SERVICE_HEALTHY, ctx); - + fs = HATestUtil.configureFailoverFs(cluster, conf); } @@ -147,11 +150,26 @@ public void shutdown() throws Exception { } } + @Test(timeout=60000) + /** + * Ensure the cluster simply starts with a hdfs jceks credential provider + * configured. HDFS-14013. + */ + public void testZFFCStartsWithCredentialProviderReferencingHDFS() + throws Exception{ + // Create a provider path on HDFS + conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, + "jceks://hdfs/tmp/test.jceks"); + // + startCluster(); + } + /** * Test that thread dump is captured after NN state changes. */ @Test(timeout=60000) public void testThreadDumpCaptureAfterNNStateChange() throws Exception { + startCluster(); MockNameNodeResourceChecker mockResourceChecker = new MockNameNodeResourceChecker(conf); mockResourceChecker.setResourcesAvailable(false); @@ -169,6 +187,7 @@ public void testThreadDumpCaptureAfterNNStateChange() throws Exception { */ @Test(timeout=60000) public void testFailoverAndBackOnNNShutdown() throws Exception { + startCluster(); Path p1 = new Path("/dir1"); Path p2 = new Path("/dir2"); @@ -201,6 +220,7 @@ public void testFailoverAndBackOnNNShutdown() throws Exception { @Test(timeout=30000) public void testManualFailover() throws Exception { + startCluster(); thr2.zkfc.getLocalTarget().getZKFCProxy(conf, 15000).gracefulFailover(); waitForHAState(0, HAServiceState.STANDBY); waitForHAState(1, HAServiceState.ACTIVE); @@ -212,6 +232,7 @@ public void testManualFailover() throws Exception { @Test(timeout=30000) public void testWithoutBindAddressSet() throws Exception { + startCluster(); DFSZKFailoverController zkfc = DFSZKFailoverController.create( conf); @@ -222,6 +243,7 @@ public void testWithoutBindAddressSet() throws Exception { @Test(timeout=30000) public void testWithBindAddressSet() throws Exception { + startCluster(); conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS); DFSZKFailoverController zkfc = DFSZKFailoverController.create( conf); @@ -239,6 +261,7 @@ public void testWithBindAddressSet() throws Exception { */ @Test public void testObserverRejectZkfcCall() throws Exception { + startCluster(); NamenodeProtocols nn1 = cluster.getNameNode(1).getRpcServer(); nn1.transitionToObserver( new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED)); @@ -251,6 +274,7 @@ public void testObserverRejectZkfcCall() throws Exception { @Test(timeout=30000) public void testManualFailoverWithDFSHAAdmin() throws Exception { + startCluster(); DFSHAAdmin tool = new DFSHAAdmin(); tool.setConf(conf); assertEquals(0, @@ -279,6 +303,7 @@ public void testManualFailoverWithDFSHAAdmin() throws Exception { @Test(timeout=30000) public void testElectionOnObserver() throws Exception{ + startCluster(); InputStream inOriginial = System.in; try { DFSHAAdmin tool = new DFSHAAdmin(); From 2ff92b80b899792dfaf3d1a01a1b5010370ccfae Mon Sep 17 00:00:00 2001 From: litao <55134131+tomscut@users.noreply.github.com> Date: Mon, 1 Mar 2021 23:52:59 +0800 Subject: [PATCH 046/666] HDFS-15854. Make some parameters configurable for SlowDiskTracker and SlowPeerTracker (#2718) Authored-by: tomscut --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 8 ++++++++ .../blockmanagement/SlowDiskTracker.java | 7 +++++-- .../blockmanagement/SlowPeerTracker.java | 7 +++++-- .../src/main/resources/hdfs-default.xml | 18 ++++++++++++++++++ 4 files changed, 36 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index d04a8426e1e59..790d1aa19fdfc 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -676,6 +676,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.datanode.slowpeer.low.threshold.ms"; public static final long DFS_DATANODE_SLOWPEER_LOW_THRESHOLD_MS_DEFAULT = 5L; + public static final String DFS_DATANODE_MAX_NODES_TO_REPORT_KEY = + "dfs.datanode.max.nodes.to.report"; + public static final int DFS_DATANODE_MAX_NODES_TO_REPORT_DEFAULT = + 5; public static final String DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY = "dfs.datanode.min.outlier.detection.disks"; public static final long DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_DEFAULT = @@ -684,6 +688,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.datanode.slowdisk.low.threshold.ms"; public static final long DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_DEFAULT = 20L; + public static final String DFS_DATANODE_MAX_DISKS_TO_REPORT_KEY = + "dfs.datanode.max.disks.to.report"; + public static final int DFS_DATANODE_MAX_DISKS_TO_REPORT_DEFAULT = + 5; public static final String DFS_DATANODE_HOST_NAME_KEY = HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_HOST_NAME_KEY; public static final String DFS_NAMENODE_CHECKPOINT_DIR_KEY = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java index 08ebf8e481d22..782340185c986 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java @@ -77,7 +77,7 @@ public class SlowDiskTracker { * Number of disks to include in JSON report per operation. We will return * disks with the highest latency. */ - private static final int MAX_DISKS_TO_REPORT = 5; + private final int maxDisksToReport; private static final String DATANODE_DISK_SEPARATOR = ":"; private final long reportGenerationIntervalMs; @@ -107,6 +107,9 @@ public SlowDiskTracker(Configuration conf, Timer timer) { DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY, DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); + this.maxDisksToReport = conf.getInt( + DFSConfigKeys.DFS_DATANODE_MAX_DISKS_TO_REPORT_KEY, + DFSConfigKeys.DFS_DATANODE_MAX_DISKS_TO_REPORT_DEFAULT); this.reportValidityMs = reportGenerationIntervalMs * 3; } @@ -153,7 +156,7 @@ public void updateSlowDiskReportAsync(long now) { @Override public void run() { slowDisksReport = getSlowDisks(diskIDLatencyMap, - MAX_DISKS_TO_REPORT, now); + maxDisksToReport, now); cleanUpOldReports(now); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java index 5b30b738c7ab5..06dd2c0102651 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java @@ -79,7 +79,7 @@ public class SlowPeerTracker { * Number of nodes to include in JSON report. We will return nodes with * the highest number of votes from peers. */ - private static final int MAX_NODES_TO_REPORT = 5; + private final int maxNodesToReport; /** * Information about peers that have reported a node as being slow. @@ -103,6 +103,9 @@ public SlowPeerTracker(Configuration conf, Timer timer) { DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY, DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS) * 3; + this.maxNodesToReport = conf.getInt( + DFSConfigKeys.DFS_DATANODE_MAX_NODES_TO_REPORT_KEY, + DFSConfigKeys.DFS_DATANODE_MAX_NODES_TO_REPORT_DEFAULT); } /** @@ -193,7 +196,7 @@ private SortedSet filterNodeReports( */ public String getJson() { Collection validReports = getJsonReports( - MAX_NODES_TO_REPORT); + maxNodesToReport); try { return WRITER.writeValueAsString(validReports); } catch (JsonProcessingException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 0c5bb35d76fd1..56c65b5affaee 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2346,6 +2346,15 @@ + + dfs.datanode.max.nodes.to.report + 5 + + Number of nodes to include in JSON report. We will return nodes with + the highest number of votes from peers. + + + dfs.datanode.outliers.report.interval 30m @@ -2386,6 +2395,15 @@ + + dfs.datanode.max.disks.to.report + 5 + + Number of disks to include in JSON report per operation. We will return + disks with the highest latency. + + + hadoop.user.group.metrics.percentiles.intervals From b7d1d7bfc76048607d8ac37b89527dc4aa70d86b Mon Sep 17 00:00:00 2001 From: Konstantin V Shvachko Date: Mon, 1 Mar 2021 16:48:48 -0800 Subject: [PATCH 047/666] HDFS-15849. ExpiredHeartbeats metric should be of Type.COUNTER. Contributed by Qi Zhu. --- .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ++- .../test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index e98a59d7283e9..22b4b92f447bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4632,7 +4632,8 @@ public long getMissingReplOneBlocksCount() { return blockManager.getMissingReplOneBlocksCount(); } - @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"}) + @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"}, + type = Metric.Type.COUNTER) public int getExpiredHeartbeats() { return datanodeStatistics.getExpiredHeartbeats(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index b4d6fc9950392..69dbf6438af93 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.test.MetricsAsserts.assertGauge; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; @@ -143,7 +143,7 @@ public void testDatanodeReport() throws Exception { assertReports(1, DatanodeReportType.DEAD, client, datanodes, null); Thread.sleep(5000); - assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem")); + assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem")); } finally { cluster.shutdown(); } From 6e4e56c3693d20656598659278b1acd686e0617e Mon Sep 17 00:00:00 2001 From: zhuqi Date: Tue, 2 Mar 2021 13:16:11 +0800 Subject: [PATCH 048/666] HDFS-15856: Make write pipeline retry times configurable. (#2721). Contributed by Qi Zhu Reviewed-by: Ayush Saxena Reviewed-by: Wei-Chiu Chuang Reviewed-by: He Xiaoqiao --- .../java/org/apache/hadoop/hdfs/DataStreamer.java | 14 ++++++++++---- .../hadoop/hdfs/client/HdfsClientConfigKeys.java | 3 +++ .../hadoop/hdfs/client/impl/DfsClientConf.java | 12 ++++++++++++ .../src/main/resources/hdfs-default.xml | 9 +++++++++ 4 files changed, 34 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index df5a479e8b974..96c86c3569b6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -529,6 +529,7 @@ boolean doWaitForRestart() { private static final int CONGESTION_BACK_OFF_MAX_TIME_IN_MS = CONGESTION_BACKOFF_MEAN_TIME_IN_MS * 10; private int lastCongestionBackoffTime; + private int maxPipelineRecoveryRetries; protected final LoadingCache excludedNodes; private final String[] favoredNodes; @@ -557,6 +558,7 @@ private DataStreamer(HdfsFileStatus stat, ExtendedBlock block, this.excludedNodes = initExcludedNodes(conf.getExcludedNodesCacheExpiry()); this.errorState = new ErrorState(conf.getDatanodeRestartTimeout()); this.addBlockFlags = flags; + this.maxPipelineRecoveryRetries = conf.getMaxPipelineRecoveryRetries(); } /** @@ -1263,14 +1265,18 @@ private boolean processDatanodeOrExternalError() throws IOException { packetSendTime.clear(); } - // If we had to recover the pipeline five times in a row for the + // If we had to recover the pipeline more than the value + // defined by maxPipelineRecoveryRetries in a row for the // same packet, this client likely has corrupt data or corrupting // during transmission. - if (!errorState.isRestartingNode() && ++pipelineRecoveryCount > 5) { + if (!errorState.isRestartingNode() && ++pipelineRecoveryCount > + maxPipelineRecoveryRetries) { LOG.warn("Error recovering pipeline for writing " + - block + ". Already retried 5 times for the same packet."); + block + ". Already retried " + maxPipelineRecoveryRetries + + " times for the same packet."); lastException.set(new IOException("Failing write. Tried pipeline " + - "recovery 5 times without success.")); + "recovery " + maxPipelineRecoveryRetries + + " times without success.")); streamerClosed = true; return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index f858080929ebb..c17ad0e861514 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -83,6 +83,9 @@ public interface HdfsClientConfigKeys { "dfs.namenode.kerberos.principal"; String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size"; int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024; + String DFS_CLIENT_PIPELINE_RECOVERY_MAX_RETRIES = + "dfs.client.pipeline.recovery.max-retries"; + int DFS_CLIENT_PIPELINE_RECOVERY_MAX_RETRIES_DEFAULT = 5; String DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout"; String DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY = "dfs.client.socket.send.buffer.size"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java index facbe70589a57..f462dca99329e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java @@ -107,6 +107,7 @@ public class DfsClientConf { private final int maxFailoverAttempts; private final int maxRetryAttempts; + private final int maxPipelineRecoveryRetries; private final int failoverSleepBaseMillis; private final int failoverSleepMaxMillis; private final int maxBlockAcquireFailures; @@ -294,6 +295,10 @@ public DfsClientConf(Configuration conf) { Preconditions.checkArgument(clientShortCircuitNum <= 5, HdfsClientConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_NUM + "can't be more then 5."); + maxPipelineRecoveryRetries = conf.getInt( + HdfsClientConfigKeys.DFS_CLIENT_PIPELINE_RECOVERY_MAX_RETRIES, + HdfsClientConfigKeys.DFS_CLIENT_PIPELINE_RECOVERY_MAX_RETRIES_DEFAULT + ); } private ByteArrayManager.Conf loadWriteByteArrayManagerConf( @@ -698,6 +703,13 @@ public ShortCircuitConf getShortCircuitConf() { return shortCircuitConf; } + /** + *@return the maxPipelineRecoveryRetries + */ + public int getMaxPipelineRecoveryRetries() { + return maxPipelineRecoveryRetries; + } + /** * Configuration for short-circuit reads. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 56c65b5affaee..94ff3ec71ec6a 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4370,6 +4370,15 @@ + + dfs.client.pipeline.recovery.max-retries + 5 + + if the DFS client encounters errors in write pipeline, + retry up to the number defined by this property before giving up. + + + dfs.client.socket-timeout 60000 From 0c28027ec988f029273a5743d4fccdbcf7242718 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Tue, 2 Mar 2021 21:47:31 +0000 Subject: [PATCH 049/666] [MAPREDUCE-7234] ClientHSSecurityInfo class is in wrong META-INF file. Contributed by Eric Badger. --- .../META-INF/services/org.apache.hadoop.security.SecurityInfo | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename hadoop-mapreduce-project/hadoop-mapreduce-client/{hadoop-mapreduce-client-jobclient => hadoop-mapreduce-client-common}/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo (100%) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo similarity index 100% rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo From 488b1a789514ceda3b7926730a3616702fa573c1 Mon Sep 17 00:00:00 2001 From: JJiaguodong <272436024@qq.com> Date: Wed, 3 Mar 2021 10:41:05 +0800 Subject: [PATCH 050/666] HADOOP-17560. Fix some spelling errors (#2730) Co-authored-by: jiaguodong5 --- .../hdfs/server/federation/store/records/MountTable.java | 8 ++++---- .../server/federation/store/records/TestMountTable.java | 2 +- .../resourcemanager/webapp/TestRMWebServicesApps.java | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java index 907a4055adb82..bedf37b64c8e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java @@ -54,9 +54,9 @@ public abstract class MountTable extends BaseRecord { "Invalid entry, all mount points must start with / "; public static final String ERROR_MSG_NO_DEST_PATH_SPECIFIED = "Invalid entry, no destination paths specified "; - public static final String ERROR_MSG_INVAILD_DEST_NS = + public static final String ERROR_MSG_INVALID_DEST_NS = "Invalid entry, invalid destination nameservice "; - public static final String ERROR_MSG_INVAILD_DEST_PATH = + public static final String ERROR_MSG_INVALID_DEST_PATH = "Invalid entry, invalid destination path "; public static final String ERROR_MSG_ALL_DEST_MUST_START_WITH_BACK_SLASH = "Invalid entry, all destination must start with / "; @@ -394,11 +394,11 @@ public void validate() { String nsId = loc.getNameserviceId(); if (nsId == null || nsId.length() == 0) { throw new IllegalArgumentException( - ERROR_MSG_INVAILD_DEST_NS + this); + ERROR_MSG_INVALID_DEST_NS + this); } if (loc.getDest() == null || loc.getDest().length() == 0) { throw new IllegalArgumentException( - ERROR_MSG_INVAILD_DEST_PATH + this); + ERROR_MSG_INVALID_DEST_PATH + this); } if (!loc.getDest().startsWith("/")) { throw new IllegalArgumentException( diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java index 339a9776ea452..2c12114ab9507 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java @@ -266,7 +266,7 @@ public void testValidation() throws IOException { fail("Mount table entry should be created failed."); } catch (Exception e) { GenericTestUtils.assertExceptionContains( - MountTable.ERROR_MSG_INVAILD_DEST_NS, e); + MountTable.ERROR_MSG_INVALID_DEST_NS, e); } destinations.clear(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java index 8a4a230a22363..fb77e2d764ff2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java @@ -1201,7 +1201,7 @@ public void testAppsQueryAppTypes() throws JSONException, Exception { } @Test - public void testAppsQueryWithInvaildDeselects() + public void testAppsQueryWithInvalidDeselects() throws JSONException, Exception { try { rm.start(); From c8a6e2c9b69e66d318c663534571d7e38ad0bc02 Mon Sep 17 00:00:00 2001 From: litao <55134131+tomscut@users.noreply.github.com> Date: Wed, 3 Mar 2021 21:34:02 +0800 Subject: [PATCH 051/666] HDFS-15870. Remove unused configuration dfs.namenode.stripe.min (#2739) Co-authored-by: tomscut --- .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 -- .../test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java | 2 -- 2 files changed, 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 790d1aa19fdfc..c29d91c421af4 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -270,8 +270,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { = "dfs.namenode.file.close.num-committed-allowed"; public static final int DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_DEFAULT = 0; - public static final String DFS_NAMENODE_STRIPE_MIN_KEY = "dfs.namenode.stripe.min"; - public static final int DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1; public static final String DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY = "dfs.namenode.safemode.replication.min"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java index da9473c8e1488..fab4fcd0409b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java @@ -83,8 +83,6 @@ public void initializeMemberVariables() { .add("dfs.datanode.non.local.lazy.persist"); configurationPropsToSkipCompare .add("dfs.namenode.tolerate.heartbeat.multiplier"); - configurationPropsToSkipCompare - .add("dfs.namenode.stripe.min"); configurationPropsToSkipCompare .add("dfs.namenode.replqueue.threshold-pct"); From ff622d043f1712f9595cb731a9e9d434c75e1b8b Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Wed, 3 Mar 2021 17:44:30 +0100 Subject: [PATCH 052/666] YARN-10655. Limit queue creation depth relative to its first static parent. Contributed by Andras Gyori. --- .../CapacitySchedulerAutoQueueHandler.java | 40 +++++++++++++------ ...CapacitySchedulerNewQueueAutoCreation.java | 20 ++++++++++ 2 files changed, 47 insertions(+), 13 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerAutoQueueHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerAutoQueueHandler.java index e847737388c0d..898b075d2e476 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerAutoQueueHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerAutoQueueHandler.java @@ -55,34 +55,43 @@ public LeafQueue autoCreateQueue(ApplicationPlacementContext queue) List parentsToCreate = new ArrayList<>(); ApplicationPlacementContext queueCandidateContext = parentContext; - CSQueue existingQueueCandidate = getQueue( + CSQueue firstExistingQueue = getQueue( queueCandidateContext.getFullQueuePath()); - while (existingQueueCandidate == null) { + while (firstExistingQueue == null) { parentsToCreate.add(queueCandidateContext); queueCandidateContext = CSQueueUtils.extractQueuePath( queueCandidateContext.getParentQueue()); - existingQueueCandidate = getQueue( + firstExistingQueue = getQueue( queueCandidateContext.getFullQueuePath()); } + CSQueue firstExistingStaticQueue = firstExistingQueue; + // Include the LeafQueue in the distance + int firstStaticParentDistance = parentsToCreate.size() + 1; + + while(isNonStaticParent(firstExistingStaticQueue)) { + queueCandidateContext = CSQueueUtils.extractQueuePath( + queueCandidateContext.getParentQueue()); + firstExistingStaticQueue = getQueue( + queueCandidateContext.getFullQueuePath()); + ++firstStaticParentDistance; + } + // Reverse the collection to to represent the hierarchy to be created // from highest to lowest level Collections.reverse(parentsToCreate); - if (!(existingQueueCandidate instanceof ParentQueue)) { + if (!(firstExistingQueue instanceof ParentQueue)) { throw new SchedulerDynamicEditException( "Could not auto create hierarchy of " + queue.getFullQueuePath() + ". Queue " - + existingQueueCandidate.getQueuePath() + + + firstExistingQueue.getQueuePath() + " is not a ParentQueue." ); } - ParentQueue existingParentQueue = (ParentQueue) existingQueueCandidate; + ParentQueue existingParentQueue = (ParentQueue) firstExistingQueue; int depthLimit = extractDepthLimit(existingParentQueue); - // The number of levels to be created including the LeafQueue - // (which is last) - int levelsToCreate = parentsToCreate.size() + 1; if (depthLimit == 0) { throw new SchedulerDynamicEditException("Auto creation of queue " + @@ -90,12 +99,12 @@ public LeafQueue autoCreateQueue(ApplicationPlacementContext queue) + existingParentQueue.getQueuePath()); } - if (levelsToCreate > depthLimit) { + if (firstStaticParentDistance > depthLimit) { throw new SchedulerDynamicEditException( "Could not auto create queue " + queue.getFullQueuePath() - + ". In order to create the desired queue hierarchy, " + - levelsToCreate + " levels of queues would need " + - "to be created, which is above the limit."); + + ". The distance of the LeafQueue from the first static " + + "ParentQueue is" + firstStaticParentDistance + ", which is " + + "above the limit."); } for (ApplicationPlacementContext current : parentsToCreate) { @@ -123,4 +132,9 @@ private int extractDepthLimit(ParentQueue parentQueue) { private CSQueue getQueue(String queue) { return queue != null ? queueManager.getQueue(queue) : null; } + + private boolean isNonStaticParent(CSQueue queue) { + return (!(queue instanceof AbstractCSQueue) + || ((AbstractCSQueue) queue).isDynamicQueue()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java index 4facf943b6e45..2f83f1f050457 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java @@ -565,6 +565,26 @@ public void testAutoCreateQueueMaxQueuesLimit() throws Exception { } } + @Test + public void testAutoQueueCreationDepthLimitFromStaticParent() + throws Exception { + startScheduler(); + + // a is the first existing queue here and it is static, therefore + // the distance is 2 + createQueue("root.a.a-auto.a1-auto"); + Assert.assertNotNull(cs.getQueue("root.a.a-auto.a1-auto")); + + try { + createQueue("root.a.a-auto.a2-auto.a3-auto"); + Assert.fail("Queue creation should not succeed because the distance " + + "from the first static parent is above limit"); + } catch (SchedulerDynamicEditException ignored) { + + } + + } + private LeafQueue createQueue(String queuePath) throws YarnException { return autoQueueHandler.autoCreateQueue( CSQueueUtils.extractQueuePath(queuePath)); From d60015847df354407fa245a7d7d8d882de383c87 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 4 Mar 2021 14:55:37 +0900 Subject: [PATCH 053/666] HADOOP-17546. Update Description of hadoop-http-auth-signature-secret in HttpAuthentication.md. Contributed by Ravuri Sushma sree. --- .../hadoop-common/src/site/markdown/HttpAuthentication.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md index ca5ce4898aa71..0c131ef3ea32b 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md @@ -43,7 +43,7 @@ The following properties should be in the `core-site.xml` of all the nodes in th | `hadoop.http.authentication.type` | `simple` | Defines authentication used for the HTTP web-consoles. The supported values are: `simple` \| `kerberos` \| `#AUTHENTICATION_HANDLER_CLASSNAME#`. | | `hadoop.http.authentication.token.validity` | `36000` | Indicates how long (in seconds) an authentication token is valid before it has to be renewed. | | `hadoop.http.authentication.token.max-inactive-interval` | `-1` (disabled) | Specifies the time, in seconds, between client requests the server will invalidate the token. | -| `hadoop.http.authentication.signature.secret.file` | `$user.home/hadoop-http-auth-signature-secret` | The signature secret file for signing the authentication tokens. The same secret should be used for all nodes in the cluster, ResourceManager, NameNode, DataNode and NodeManager. This file should be readable only by the Unix user running the daemons. | +| `hadoop.http.authentication.signature.secret.file` | `$user.home/hadoop-http-auth-signature-secret` | The signature secret file for signing the authentication tokens. A different secret should be used for each service in the cluster, ResourceManager, NameNode, DataNode and NodeManager. This file should be readable only by the Unix user running the daemons. | | `hadoop.http.authentication.cookie.domain` | | The domain to use for the HTTP cookie that stores the authentication token. For authentication to work correctly across all nodes in the cluster the domain must be correctly set. There is no default value, the HTTP cookie will not have a domain working only with the hostname issuing the HTTP cookie. | | `hadoop.http.authentication.cookie.persistent` | `false` (session cookie) | Specifies the persistence of the HTTP cookie. If the value is true, the cookie is a persistent one. Otherwise, it is a session cookie. *IMPORTANT*: when using IP addresses, browsers ignore cookies with domain settings. For this setting to work properly all nodes in the cluster must be configured to generate URLs with `hostname.domain` names on it. | | `hadoop.http.authentication.simple.anonymous.allowed` | `true` | Indicates whether anonymous requests are allowed when using 'simple' authentication. | From e59d4f5811c3d9c8b9e97a1519bbc4ce48d0be0a Mon Sep 17 00:00:00 2001 From: Neil Date: Thu, 4 Mar 2021 17:22:58 +0800 Subject: [PATCH 054/666] YARN-10649. Fix RMNodeImpl.updateExistContainers leak (#2719). Contributed by Max Xie --- .../yarn/server/resourcemanager/rmnode/RMNodeImpl.java | 7 +++++++ .../server/resourcemanager/TestRMNodeTransitions.java | 10 ++++++++++ 2 files changed, 17 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index cec9915e0d1e0..6070d02217ce1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -1464,6 +1464,11 @@ public int getQueueSize() { return nodeUpdateQueue.size(); } + // For test only. + @VisibleForTesting + public Map getUpdatedExistContainers() { + return this.updatedExistContainers; + } // For test only. @VisibleForTesting public Set getLaunchedContainers() { @@ -1582,6 +1587,7 @@ private void handleContainerStatus(List containerStatuses) { } else { // A finished container launchedContainers.remove(containerId); + updatedExistContainers.remove(containerId); if (completedContainers.add(containerId)) { newlyCompletedContainers.add(remoteContainer); } @@ -1595,6 +1601,7 @@ private void handleContainerStatus(List containerStatuses) { findLostContainers(numRemoteRunningContainers, containerStatuses); for (ContainerStatus remoteContainer : lostContainers) { ContainerId containerId = remoteContainer.getContainerId(); + updatedExistContainers.remove(containerId); if (completedContainers.add(containerId)) { newlyCompletedContainers.add(remoteContainer); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index b21bf394ea1a1..dad27839cf6c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -1096,6 +1096,12 @@ public void testDisappearingContainer() { node.getLaunchedContainers().contains(cid1)); Assert.assertTrue("second container not running", node.getLaunchedContainers().contains(cid2)); + assertEquals("unexpected number of running containers", + 2, node.getUpdatedExistContainers().size()); + Assert.assertTrue("first container not running", + node.getUpdatedExistContainers().containsKey(cid1)); + Assert.assertTrue("second container not running", + node.getUpdatedExistContainers().containsKey(cid2)); assertEquals("already completed containers", 0, completedContainers.size()); containerStats.remove(0); @@ -1115,6 +1121,10 @@ public void testDisappearingContainer() { 1, node.getLaunchedContainers().size()); Assert.assertTrue("second container not running", node.getLaunchedContainers().contains(cid2)); + assertEquals("unexpected number of running containers", + 1, node.getUpdatedExistContainers().size()); + Assert.assertTrue("second container not running", + node.getUpdatedExistContainers().containsKey(cid2)); } @Test From b8ff1ef39171c355a5fb1540b4db36b2c6a1ff13 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Thu, 4 Mar 2021 12:23:11 +0100 Subject: [PATCH 055/666] YARN-10623. Capacity scheduler should support refresh queue automatically by a thread policy. Contributed by Qi Zhu. --- .../FileSystemBasedConfigurationProvider.java | 8 + .../scheduler/capacity/CapacityScheduler.java | 4 + .../CapacitySchedulerConfiguration.java | 12 + .../QueueConfigurationAutoRefreshPolicy.java | 196 +++++++++++ ...stQueueConfigurationAutoRefreshPolicy.java | 308 ++++++++++++++++++ 5 files changed, 528 insertions(+) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueConfigurationAutoRefreshPolicy.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueConfigurationAutoRefreshPolicy.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java index 3532d13f4a917..156468e4f4825 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java @@ -92,4 +92,12 @@ public synchronized void initInternal(Configuration bootstrapConf) public synchronized void closeInternal() throws Exception { fs.close(); } + + public FileSystem getFs() { + return fs; + } + + public Path getConfigDir() { + return configDir; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 36f831dca9459..ee91b0c3825a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -3396,6 +3396,10 @@ public MutableConfigurationProvider getMutableConfProvider() { return null; } + public CSConfigurationProvider getCsConfProvider() { + return csConfProvider; + } + @Override public void resetSchedulerMetrics() { CapacitySchedulerMetrics.destroy(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 8e605964e4819..b66ab85733eda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -2200,6 +2200,18 @@ public void setAutoCreatedQueuesV2MaxChildQueuesLimit(String queuePath, public static final long DEFAULT_QUEUE_MANAGEMENT_MONITORING_INTERVAL = 1500L; + /** + * Time in milliseconds between invocations + * of QueueConfigurationAutoRefreshPolicy. + */ + @Private + public static final String QUEUE_AUTO_REFRESH_MONITORING_INTERVAL = + PREFIX + "queue.auto.refresh.monitoring-interval"; + + @Private + public static final long DEFAULT_QUEUE_AUTO_REFRESH_MONITORING_INTERVAL = + 5000L; + /** * Queue Management computation policy for Auto Created queues * @param queue The queue's path diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueConfigurationAutoRefreshPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueConfigurationAutoRefreshPolicy.java new file mode 100644 index 0000000000000..0ae0777e80155 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueConfigurationAutoRefreshPolicy.java @@ -0,0 +1,196 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.util.MonotonicClock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; + +import org.apache.hadoop.yarn.util.Clock; +import org.apache.hadoop.yarn.util.resource.ResourceCalculator; + +import java.io.IOException; + + +/** + * Queue auto refresh policy for queues. + */ +public class QueueConfigurationAutoRefreshPolicy + implements SchedulingEditPolicy { + + private static final Logger LOG = + LoggerFactory.getLogger(QueueConfigurationAutoRefreshPolicy.class); + + private Clock clock; + + // Pointer to other RM components + private RMContext rmContext; + private ResourceCalculator rc; + private CapacityScheduler scheduler; + private RMNodeLabelsManager nlm; + + private long monitoringInterval; + private long lastModified; + + // Last time we attempt to reload queues + // included successful and failed case. + private long lastReloadAttempt; + private boolean lastReloadAttemptFailed = false; + + // Path to XML file containing allocations. + private Path allocCsFile; + private FileSystem fs; + + /** + * Instantiated by CapacitySchedulerConfiguration. + */ + public QueueConfigurationAutoRefreshPolicy() { + clock = new MonotonicClock(); + } + + @Override + public void init(final Configuration config, final RMContext context, + final ResourceScheduler sched) { + LOG.info("Queue auto refresh Policy monitor: {}" + this. + getClass().getCanonicalName()); + assert null == scheduler : "Unexpected duplicate call to init"; + if (!(sched instanceof CapacityScheduler)) { + throw new YarnRuntimeException("Class " + + sched.getClass().getCanonicalName() + " not instance of " + + CapacityScheduler.class.getCanonicalName()); + } + rmContext = context; + scheduler = (CapacityScheduler) sched; + clock = scheduler.getClock(); + + rc = scheduler.getResourceCalculator(); + nlm = scheduler.getRMContext().getNodeLabelManager(); + + CapacitySchedulerConfiguration csConfig = scheduler.getConfiguration(); + + monitoringInterval = csConfig.getLong( + CapacitySchedulerConfiguration.QUEUE_AUTO_REFRESH_MONITORING_INTERVAL, + CapacitySchedulerConfiguration. + DEFAULT_QUEUE_AUTO_REFRESH_MONITORING_INTERVAL); + } + + + @Override + public void editSchedule() { + long startTs = clock.getTime(); + + try { + + // Support both FileSystemBased and LocalFile based + if (rmContext.getYarnConfiguration(). + get(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS). + equals(FileSystemBasedConfigurationProvider + .class.getCanonicalName())) { + allocCsFile = new Path(rmContext.getYarnConfiguration(). + get(YarnConfiguration.FS_BASED_RM_CONF_STORE), + YarnConfiguration.CS_CONFIGURATION_FILE); + } else { + allocCsFile = new Path(rmContext.getYarnConfiguration() + .getClassLoader().getResource("").toString(), + YarnConfiguration.CS_CONFIGURATION_FILE); + } + + // Check if the cs related conf modified + fs = allocCsFile.getFileSystem(rmContext.getYarnConfiguration()); + + lastModified = + fs.getFileStatus(allocCsFile).getModificationTime(); + + long time = clock.getTime(); + + if (lastModified > lastReloadAttempt && + time > lastReloadAttempt + monitoringInterval) { + try { + rmContext.getRMAdminService().refreshQueues(); + LOG.info("Queue auto refresh completed successfully"); + lastReloadAttempt = clock.getTime(); + } catch (IOException | YarnException e) { + LOG.error("Can't refresh queue: " + e); + if (!lastReloadAttemptFailed) { + LOG.error("Failed to reload capacity scheduler config file - " + + "will use existing conf.", e.getMessage()); + } + lastReloadAttempt = clock.getTime(); + lastReloadAttemptFailed = true; + } + + } else if (lastModified == 0L) { + if (!lastReloadAttemptFailed) { + LOG.warn("Failed to reload capacity scheduler config file because" + + " last modified returned 0. File exists: " + + fs.exists(allocCsFile)); + } + lastReloadAttemptFailed = true; + } + + } catch (IOException e) { + LOG.error("Can't get file status for refresh : " + e); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms."); + } + } + + @VisibleForTesting + long getLastReloadAttempt() { + return lastReloadAttempt; + } + + @VisibleForTesting + long getLastModified() { + return lastModified; + } + + @VisibleForTesting + Clock getClock() { + return clock; + } + + @VisibleForTesting + boolean getLastReloadAttemptFailed() { + return lastReloadAttemptFailed; + } + + @Override + public long getMonitoringInterval() { + return monitoringInterval; + } + + @Override + public String getPolicyName() { + return QueueConfigurationAutoRefreshPolicy.class.getCanonicalName(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueConfigurationAutoRefreshPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueConfigurationAutoRefreshPolicy.java new file mode 100644 index 0000000000000..f4a5a2103d648 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueConfigurationAutoRefreshPolicy.java @@ -0,0 +1,308 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider; +import org.apache.hadoop.yarn.LocalConfigurationProvider; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; + +public class TestQueueConfigurationAutoRefreshPolicy { + + private Configuration configuration; + private MockRM rm = null; + private FileSystem fs; + private Path workingPath; + private Path workingPathRecover; + private Path fileSystemWorkingPath; + private Path tmpDir; + private QueueConfigurationAutoRefreshPolicy policy; + + static { + YarnConfiguration.addDefaultResource( + YarnConfiguration.CS_CONFIGURATION_FILE); + YarnConfiguration.addDefaultResource( + YarnConfiguration.DR_CONFIGURATION_FILE); + } + + @Before + public void setup() throws IOException { + QueueMetrics.clearQueueMetrics(); + DefaultMetricsSystem.setMiniClusterMode(true); + + configuration = new YarnConfiguration(); + configuration.set(YarnConfiguration.RM_SCHEDULER, + CapacityScheduler.class.getCanonicalName()); + fs = FileSystem.get(configuration); + workingPath = new Path(QueueConfigurationAutoRefreshPolicy. + class.getClassLoader(). + getResource(".").toString()); + workingPathRecover = new Path(QueueConfigurationAutoRefreshPolicy. + class.getClassLoader(). + getResource(".").toString() + "/" + "Recover"); + fileSystemWorkingPath = + new Path(new File("target", this.getClass().getSimpleName() + + "-remoteDir").getAbsolutePath()); + + tmpDir = new Path(new File("target", this.getClass().getSimpleName() + + "-tmpDir").getAbsolutePath()); + fs.delete(fileSystemWorkingPath, true); + fs.mkdirs(fileSystemWorkingPath); + fs.delete(tmpDir, true); + fs.mkdirs(tmpDir); + + policy = + new QueueConfigurationAutoRefreshPolicy(); + } + + private String writeConfigurationXML(Configuration conf, String confXMLName) + throws IOException { + DataOutputStream output = null; + try { + final File confFile = new File(tmpDir.toString(), confXMLName); + if (confFile.exists()) { + confFile.delete(); + } + if (!confFile.createNewFile()) { + Assert.fail("Can not create " + confXMLName); + } + output = new DataOutputStream( + new FileOutputStream(confFile)); + conf.writeXml(output); + return confFile.getAbsolutePath(); + } finally { + if (output != null) { + output.close(); + } + } + } + + private void uploadConfiguration(Boolean isFileSystemBased, + Configuration conf, String confFileName) + throws IOException { + String csConfFile = writeConfigurationXML(conf, confFileName); + if (isFileSystemBased) { + // upload the file into Remote File System + uploadToRemoteFileSystem(new Path(csConfFile), + fileSystemWorkingPath); + } else { + // upload the file into Work Path for Local File + uploadToRemoteFileSystem(new Path(csConfFile), + workingPath); + } + } + + private void uploadToRemoteFileSystem(Path filePath, Path remotePath) + throws IOException { + fs.copyFromLocalFile(filePath, remotePath); + } + + private void uploadDefaultConfiguration(Boolean + isFileSystemBased) throws IOException { + Configuration conf = new Configuration(); + uploadConfiguration(isFileSystemBased, + conf, "core-site.xml"); + + YarnConfiguration yarnConf = new YarnConfiguration(); + + uploadConfiguration(isFileSystemBased, + yarnConf, "yarn-site.xml"); + + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + uploadConfiguration(isFileSystemBased, + csConf, "capacity-scheduler.xml"); + + Configuration hadoopPolicyConf = new Configuration(false); + hadoopPolicyConf + .addResource(YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE); + uploadConfiguration(isFileSystemBased, + hadoopPolicyConf, "hadoop-policy.xml"); + } + + @Test + public void testFileSystemBasedEditSchedule() throws Exception { + // Test FileSystemBasedConfigurationProvider scheduled + testCommon(true); + } + + @Test + public void testLocalFileBasedEditSchedule() throws Exception { + // Prepare for recover for local file default. + fs.mkdirs(workingPath); + fs.copyFromLocalFile(new Path(workingPath.toString() + + "/" + YarnConfiguration.CORE_SITE_CONFIGURATION_FILE), + new Path(workingPathRecover.toString() + + "/" + YarnConfiguration.CORE_SITE_CONFIGURATION_FILE)); + + fs.copyFromLocalFile(new Path(workingPath.toString() + + "/" + YarnConfiguration.YARN_SITE_CONFIGURATION_FILE), + new Path(workingPathRecover.toString() + + "/" + YarnConfiguration.YARN_SITE_CONFIGURATION_FILE)); + + fs.copyFromLocalFile(new Path(workingPath.toString() + + "/" + YarnConfiguration.CS_CONFIGURATION_FILE), + new Path(workingPathRecover.toString() + + "/" + YarnConfiguration.CS_CONFIGURATION_FILE)); + + // Test LocalConfigurationProvider scheduled + testCommon(false); + + // Recover for recover for local file default. + fs.copyFromLocalFile(new Path(workingPathRecover.toString() + + "/" + YarnConfiguration.CORE_SITE_CONFIGURATION_FILE), + new Path(workingPath.toString() + + "/" + YarnConfiguration.CORE_SITE_CONFIGURATION_FILE)); + + fs.copyFromLocalFile(new Path(workingPathRecover.toString() + + "/" + YarnConfiguration.YARN_SITE_CONFIGURATION_FILE), + new Path(workingPath.toString() + + "/" + YarnConfiguration.YARN_SITE_CONFIGURATION_FILE)); + + fs.copyFromLocalFile(new Path(workingPathRecover.toString() + + "/" + YarnConfiguration.CS_CONFIGURATION_FILE), + new Path(workingPath.toString() + + "/" + YarnConfiguration.CS_CONFIGURATION_FILE)); + + fs.delete(workingPathRecover, true); + } + + public void testCommon(Boolean isFileSystemBased) throws Exception { + + // Set auto refresh interval to 1s + configuration.setLong(CapacitySchedulerConfiguration. + QUEUE_AUTO_REFRESH_MONITORING_INTERVAL, + 1000L); + + if (isFileSystemBased) { + configuration.set(YarnConfiguration.FS_BASED_RM_CONF_STORE, + fileSystemWorkingPath.toString()); + } + + //upload default configurations + uploadDefaultConfiguration(isFileSystemBased); + + if (isFileSystemBased) { + configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS, + FileSystemBasedConfigurationProvider.class.getCanonicalName()); + } else { + configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS, + LocalConfigurationProvider.class.getCanonicalName()); + } + + // upload the auto refresh related configurations + uploadConfiguration(isFileSystemBased, + configuration, "yarn-site.xml"); + uploadConfiguration(isFileSystemBased, + configuration, "capacity-scheduler.xml"); + + rm = new MockRM(configuration); + rm.init(configuration); + policy.init(configuration, + rm.getRMContext(), + rm.getResourceScheduler()); + rm.start(); + + CapacityScheduler cs = + (CapacityScheduler) rm.getRMContext().getScheduler(); + + int maxAppsBefore = cs.getConfiguration().getMaximumSystemApplications(); + + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + csConf.setInt(CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS, + 5000); + uploadConfiguration(isFileSystemBased, + csConf, "capacity-scheduler.xml"); + + // Refreshed first time. + policy.editSchedule(); + + // Make sure refresh successfully. + Assert.assertFalse(policy.getLastReloadAttemptFailed()); + long oldModified = policy.getLastModified(); + long oldSuccess = policy.getLastReloadAttempt(); + + Assert.assertTrue(oldSuccess > oldModified); + + int maxAppsAfter = cs.getConfiguration().getMaximumSystemApplications(); + Assert.assertEquals(maxAppsAfter, 5000); + Assert.assertTrue(maxAppsAfter != maxAppsBefore); + + // Trigger interval for refresh. + GenericTestUtils.waitFor(() -> (policy.getClock().getTime() - + policy.getLastReloadAttempt()) / 1000 > 1, + 500, 3000); + + // Upload for modified. + csConf.setInt(CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS, + 3000); + uploadConfiguration(isFileSystemBased, + csConf, "capacity-scheduler.xml"); + + policy.editSchedule(); + // Wait for triggered refresh. + GenericTestUtils.waitFor(() -> policy.getLastReloadAttempt() > + policy.getLastModified(), + 500, 3000); + + // Make sure refresh successfully. + Assert.assertFalse(policy.getLastReloadAttemptFailed()); + oldModified = policy.getLastModified(); + oldSuccess = policy.getLastReloadAttempt(); + Assert.assertTrue(oldSuccess > oldModified); + Assert.assertEquals(cs.getConfiguration(). + getMaximumSystemApplications(), 3000); + + // Trigger interval for refresh. + GenericTestUtils.waitFor(() -> (policy.getClock().getTime() - + policy.getLastReloadAttempt()) / 1000 > 1, + 500, 3000); + + // Without modified + policy.editSchedule(); + Assert.assertEquals(oldModified, + policy.getLastModified()); + Assert.assertEquals(oldSuccess, + policy.getLastReloadAttempt()); + } + + @After + public void tearDown() throws IOException { + if (rm != null) { + rm.stop(); + } + fs.delete(fileSystemWorkingPath, true); + fs.delete(tmpDir, true); + } +} From b3c1c38d61ca5cb1ab3e09fd4e63bdb34a23acfd Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Thu, 4 Mar 2021 17:18:35 +0100 Subject: [PATCH 056/666] YARN-10532. Capacity Scheduler Auto Queue Creation: Allow auto delete queue when queue is not being used. Contributed by Qi Zhu. --- .../scheduler/capacity/AbstractCSQueue.java | 47 +++ .../AutoCreatedQueueDeletionPolicy.java | 163 ++++++++++ .../scheduler/capacity/CapacityScheduler.java | 64 +++- .../CapacitySchedulerConfiguration.java | 56 ++++ .../scheduler/capacity/LeafQueue.java | 10 + .../scheduler/capacity/ParentQueue.java | 34 +- .../event/AutoCreatedQueueDeletionEvent.java | 32 ++ .../scheduler/event/SchedulerEventType.java | 5 +- .../monitor/TestSchedulingMonitor.java | 43 +++ .../TestAutoCreatedQueueDeletionPolicy.java | 184 +++++++++++ ...CapacitySchedulerNewQueueAutoCreation.java | 303 +++++++++++++++++- 11 files changed, 931 insertions(+), 10 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueDeletionPolicy.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AutoCreatedQueueDeletionEvent.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueDeletionPolicy.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 28e2d54c49426..e5380fa952889 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -29,6 +29,7 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -154,6 +155,10 @@ public enum CapacityConfigType { // is it a dynamic queue? private boolean dynamicQueue = false; + // The timestamp of the last submitted application to this queue. + // Only applies to dynamic queues. + private long lastSubmittedTimestamp; + public AbstractCSQueue(CapacitySchedulerContext cs, String queueName, CSQueue parent, CSQueue old) throws IOException { this(cs, cs.getConfiguration(), queueName, parent, old); @@ -1642,4 +1647,46 @@ protected String getCapacityOrWeightString() { return "capacity=" + queueCapacities.getCapacity(); } } + + public boolean isEligibleForAutoDeletion() { + return false; + } + + public boolean isInactiveDynamicQueue() { + long idleDurationSeconds = + (Time.monotonicNow() - getLastSubmittedTimestamp())/1000; + return isDynamicQueue() && isEligibleForAutoDeletion() && + (idleDurationSeconds > this.csContext.getConfiguration(). + getAutoExpiredDeletionTime()); + } + + public void updateLastSubmittedTimeStamp() { + writeLock.lock(); + try { + this.lastSubmittedTimestamp = Time.monotonicNow(); + } finally { + writeLock.unlock(); + } + } + + public long getLastSubmittedTimestamp() { + readLock.lock(); + + try { + return lastSubmittedTimestamp; + } finally { + readLock.unlock(); + } + } + + @VisibleForTesting + public void setLastSubmittedTimestamp(long lastSubmittedTimestamp) { + writeLock.lock(); + try { + this.lastSubmittedTimestamp = lastSubmittedTimestamp; + } finally { + writeLock.unlock(); + } + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueDeletionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueDeletionPolicy.java new file mode 100644 index 0000000000000..4b47bb471648f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueDeletionPolicy.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AutoCreatedQueueDeletionEvent; +import org.apache.hadoop.yarn.util.Clock; +import org.apache.hadoop.yarn.util.resource.ResourceCalculator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * Auto deletion policy for auto created queue V2. + * Just for weight based auto created queues. + */ +public class AutoCreatedQueueDeletionPolicy implements SchedulingEditPolicy { + private static final Logger LOG = + LoggerFactory.getLogger(AutoCreatedQueueDeletionPolicy.class); + + private Clock clock; + + // Pointer to other RM components + private RMContext rmContext; + private ResourceCalculator rc; + private CapacityScheduler scheduler; + + private long monitoringInterval; + + // markedForDeletion: in each interval, + // this set is extended by queues that are eligible for auto deletion. + private Set markedForDeletion = new HashSet<>(); + // sentForDeletion: if in the next interval, + // there is queue, that is eligible for auto deletion, + // and is already marked for deletion, move it to this queue. + private Set sentForDeletion = new HashSet<>(); + + @Override + public void init(final Configuration config, final RMContext context, + final ResourceScheduler sched) { + LOG.info("Auto Deletion Policy monitor: {}" + this. + getClass().getCanonicalName()); + if (!(sched instanceof CapacityScheduler)) { + throw new YarnRuntimeException("Class " + + sched.getClass().getCanonicalName() + " not instance of " + + CapacityScheduler.class.getCanonicalName()); + } + rmContext = context; + scheduler = (CapacityScheduler) sched; + clock = scheduler.getClock(); + + rc = scheduler.getResourceCalculator(); + + CapacitySchedulerConfiguration csConfig = scheduler.getConfiguration(); + + // The monitor time will equal the + // auto deletion expired time default. + monitoringInterval = + csConfig.getLong(CapacitySchedulerConfiguration. + AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, + CapacitySchedulerConfiguration. + DEFAULT_AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME) * 1000; + + prepareForAutoDeletion(); + } + + public void prepareForAutoDeletion() { + Set newMarks = new HashSet<>(); + for (Map.Entry queueEntry : + scheduler.getCapacitySchedulerQueueManager().getQueues().entrySet()) { + String queuePath = queueEntry.getKey(); + CSQueue queue = queueEntry.getValue(); + if (queue instanceof AbstractCSQueue && + ((AbstractCSQueue) queue).isEligibleForAutoDeletion()) { + if (markedForDeletion.contains(queuePath)) { + sentForDeletion.add(queuePath); + markedForDeletion.remove(queuePath); + } else { + newMarks.add(queuePath); + } + } + } + markedForDeletion.clear(); + markedForDeletion.addAll(newMarks); + } + + @Override + public void editSchedule() { + long startTs = clock.getTime(); + + prepareForAutoDeletion(); + triggerAutoDeletionForExpiredQueues(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms."); + } + } + + public void triggerAutoDeletionForExpiredQueues() { + // Proceed new auto created queues + for (String queueName : sentForDeletion) { + CSQueue checkQueue = + scheduler.getCapacitySchedulerQueueManager(). + getQueue(queueName); + deleteAutoCreatedQueue(checkQueue); + } + sentForDeletion.clear(); + } + + private void deleteAutoCreatedQueue(CSQueue queue) { + if (queue != null) { + AutoCreatedQueueDeletionEvent autoCreatedQueueDeletionEvent = + new AutoCreatedQueueDeletionEvent(queue); + LOG.info("Queue:" + queue.getQueuePath() + + " will trigger deletion event to CS."); + scheduler.getRMContext().getDispatcher().getEventHandler().handle( + autoCreatedQueueDeletionEvent); + } + } + + @Override + public long getMonitoringInterval() { + return monitoringInterval; + } + + @Override + public String getPolicyName() { + return AutoCreatedQueueDeletionPolicy.class.getCanonicalName(); + } + + @VisibleForTesting + public Set getMarkedForDeletion() { + return markedForDeletion; + } + + @VisibleForTesting + public Set getSentForDeletion() { + return sentForDeletion; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index ee91b0c3825a5..467dacbfbb472 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -143,9 +143,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; - import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event .QueueManagementChangeEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AutoCreatedQueueDeletionEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ReleaseContainerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; @@ -2106,11 +2106,35 @@ public void handle(SchedulerEvent event) { } } break; + case AUTO_QUEUE_DELETION: + try { + AutoCreatedQueueDeletionEvent autoCreatedQueueDeletionEvent = + (AutoCreatedQueueDeletionEvent) event; + removeAutoCreatedQueue(autoCreatedQueueDeletionEvent. + getCheckQueue()); + } catch (SchedulerDynamicEditException sde) { + LOG.error("Dynamic queue deletion cannot be applied for " + + "queue : ", sde); + } + break; default: LOG.error("Invalid eventtype " + event.getType() + ". Ignoring!"); } } + private void removeAutoCreatedQueue(CSQueue checkQueue) + throws SchedulerDynamicEditException{ + writeLock.lock(); + try { + if (checkQueue instanceof AbstractCSQueue + && ((AbstractCSQueue) checkQueue).isInactiveDynamicQueue()) { + removeQueue(checkQueue); + } + } finally { + writeLock.unlock(); + } + } + private void updateNodeAttributes( NodeAttributesUpdateSchedulerEvent attributeUpdateEvent) { writeLock.lock(); @@ -2564,6 +2588,44 @@ public void removeQueue(String queueName) } } + public void removeQueue(CSQueue queue) + throws SchedulerDynamicEditException { + writeLock.lock(); + try { + LOG.info("Removing queue: " + queue.getQueuePath()); + if (!((AbstractCSQueue)queue).isDynamicQueue()) { + throw new SchedulerDynamicEditException( + "The queue that we are asked " + + "to remove (" + queue.getQueuePath() + + ") is not a DynamicQueue"); + } + + if (!((AbstractCSQueue) queue).isEligibleForAutoDeletion()) { + LOG.warn("Queue " + queue.getQueuePath() + + " is marked for deletion, but not eligible for deletion"); + return; + } + + ParentQueue parentQueue = (ParentQueue)queue.getParent(); + if (parentQueue != null) { + ((ParentQueue) queue.getParent()).removeChildQueue(queue); + } else { + throw new SchedulerDynamicEditException( + "The queue " + queue.getQueuePath() + + " can't be removed because it's parent is null"); + } + + if (parentQueue.childQueues.contains(queue) || + queueManager.getQueue(queue.getQueuePath()) != null) { + throw new SchedulerDynamicEditException( + "The queue " + queue.getQueuePath() + + " has not been removed normally."); + } + } finally { + writeLock.unlock(); + } + } + @Override public void addQueue(Queue queue) throws SchedulerDynamicEditException, IOException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index b66ab85733eda..90979dc94dd99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -2200,6 +2200,62 @@ public void setAutoCreatedQueuesV2MaxChildQueuesLimit(String queuePath, public static final long DEFAULT_QUEUE_MANAGEMENT_MONITORING_INTERVAL = 1500L; + @Private + public static final boolean + DEFAULT_AUTO_CREATE_CHILD_QUEUE_AUTO_REMOVAL_ENABLE = true; + + @Private + public static final String AUTO_CREATE_CHILD_QUEUE_AUTO_REMOVAL_ENABLE = + AUTO_QUEUE_CREATION_V2_PREFIX + "queue-auto-removal.enable"; + + // 300s for expired default + @Private + public static final long + DEFAULT_AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME = 300; + + @Private + public static final String AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME = + PREFIX + AUTO_QUEUE_CREATION_V2_PREFIX + "queue-expiration-time"; + + /** + * If true, auto created queue with weight mode + * will be deleted when queue is expired. + * @param queuePath the queue's path for auto deletion check + * @return true if auto created queue's deletion when expired is enabled + * else false. Default + * is true. + */ + @Private + public boolean isAutoExpiredDeletionEnabled(String queuePath) { + boolean isAutoExpiredDeletionEnabled = getBoolean( + getQueuePrefix(queuePath) + + AUTO_CREATE_CHILD_QUEUE_AUTO_REMOVAL_ENABLE, + DEFAULT_AUTO_CREATE_CHILD_QUEUE_AUTO_REMOVAL_ENABLE); + return isAutoExpiredDeletionEnabled; + } + + @Private + @VisibleForTesting + public void setAutoExpiredDeletionEnabled(String queuePath, + boolean autoRemovalEnable) { + setBoolean(getQueuePrefix(queuePath) + + AUTO_CREATE_CHILD_QUEUE_AUTO_REMOVAL_ENABLE, + autoRemovalEnable); + } + + @Private + @VisibleForTesting + public void setAutoExpiredDeletionTime(long time) { + setLong(AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, time); + } + + @Private + @VisibleForTesting + public long getAutoExpiredDeletionTime() { + return getLong(AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, + DEFAULT_AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME); + } + /** * Time in milliseconds between invocations * of QueueConfigurationAutoRefreshPolicy. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 104a89caee0c0..71e65cb4367c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -614,6 +614,9 @@ public void submitApplication(ApplicationId applicationId, String userName, // Careful! Locking order is important! validateSubmitApplication(applicationId, userName, queue); + // Signal for expired auto deletion. + updateLastSubmittedTimeStamp(); + // Inform the parent queue try { getParent().submitApplication(applicationId, userName, queue); @@ -2402,4 +2405,11 @@ List getCopyOfNonRunnableAppSchedulables() { } return appsToReturn; } + + @Override + public boolean isEligibleForAutoDeletion() { + return isDynamicQueue() && getNumApplications() == 0 + && csContext.getConfiguration(). + isAutoExpiredDeletionEnabled(this.getQueuePath()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index ce5e49040e678..3d28933141359 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -570,9 +570,10 @@ private CSQueue addDynamicChildQueue(String childQueuePath, boolean isLeaf) CSQueue newQueue = createNewQueue(childQueuePath, isLeaf); this.childQueues.add(newQueue); + updateLastSubmittedTimeStamp(); - // Call updateClusterResource - // , which will deal with all effectiveMin/MaxResource + // Call updateClusterResource. + // Which will deal with all effectiveMin/MaxResource // Calculation this.updateClusterResource(csContext.getClusterResource(), new ResourceLimits(this.csContext.getClusterResource())); @@ -583,6 +584,28 @@ private CSQueue addDynamicChildQueue(String childQueuePath, boolean isLeaf) } } + + // New method to remove child queue + public void removeChildQueue(CSQueue queue) + throws SchedulerDynamicEditException { + writeLock.lock(); + try { + // Now we can do remove and update + this.childQueues.remove(queue); + this.scheduler.getCapacitySchedulerQueueManager() + .removeQueue(queue.getQueuePath()); + + // Call updateClusterResource, + // which will deal with all effectiveMin/MaxResource + // Calculation + this.updateClusterResource(csContext.getClusterResource(), + new ResourceLimits(this.csContext.getClusterResource())); + + } finally { + writeLock.unlock(); + } + } + /** * Check whether this queue supports adding additional child queues * dynamically. @@ -1607,4 +1630,11 @@ void decrementRunnableApps() { Map getEffectiveMinRatioPerResource() { return effectiveMinRatioPerResource; } + + @Override + public boolean isEligibleForAutoDeletion() { + return isDynamicQueue() && getChildQueues().size() == 0 && + csContext.getConfiguration(). + isAutoExpiredDeletionEnabled(this.getQueuePath()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AutoCreatedQueueDeletionEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AutoCreatedQueueDeletionEvent.java new file mode 100644 index 0000000000000..68b86dda408a5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AutoCreatedQueueDeletionEvent.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; + +public class AutoCreatedQueueDeletionEvent extends SchedulerEvent{ + private CSQueue checkQueue; + public AutoCreatedQueueDeletionEvent(CSQueue checkQueue) { + super(SchedulerEventType.AUTO_QUEUE_DELETION); + this.checkQueue = checkQueue; + } + + public CSQueue getCheckQueue() { + return checkQueue; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java index 869bf0ed9e457..3b8a1de64e2d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java @@ -55,5 +55,8 @@ public enum SchedulerEventType { MARK_CONTAINER_FOR_NONKILLABLE, //Queue Management Change - MANAGE_QUEUE + MANAGE_QUEUE, + + // Auto created queue, auto deletion check + AUTO_QUEUE_DELETION } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/TestSchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/TestSchedulingMonitor.java index 84126c7287793..f04081e48600d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/TestSchedulingMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/TestSchedulingMonitor.java @@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AutoCreatedQueueDeletionPolicy; import org.junit.Test; import java.util.HashSet; @@ -91,5 +92,47 @@ public void testRMUpdateSchedulingEditPolicy() throws Exception { YarnConfiguration.DEFAULT_RM_SCHEDULER_ENABLE_MONITORS); cs.reinitialize(conf, rm.getRMContext()); assertTrue(smm.isRSMEmpty()); + rm.close(); + } + + @Test(timeout = 10000) + public void testRMUpdateAutoCreatedQueueDeletionPolicy() throws Exception { + CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); + conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, + AutoCreatedQueueDeletionPolicy.class.getCanonicalName()); + MockRM rm = new MockRM(conf); + rm.start(); + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + SchedulingMonitorManager smm = cs.getSchedulingMonitorManager(); + + // runningSchedulingMonitors should not be empty when initialize RM + // scheduler monitor + cs.reinitialize(conf, rm.getRMContext()); + assertFalse(smm.isRSMEmpty()); + + // make sure runningSchedulingPolicies contains all the configured policy + // in YARNConfiguration + String[] configuredPolicies = conf.getStrings( + YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES); + Set configurePoliciesSet = new HashSet<>(); + for (String s : configuredPolicies) { + configurePoliciesSet.add(s); + } + assertTrue(smm.isSameConfiguredPolicies(configurePoliciesSet)); + + // make sure the running monitor contains AutoCreatedQueueDeletionPolicy + assertTrue(configurePoliciesSet. + contains(AutoCreatedQueueDeletionPolicy.class.getCanonicalName())); + + // disable RM scheduler monitor + conf.setBoolean( + YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_ENABLE_MONITORS); + cs.reinitialize(conf, rm.getRMContext()); + assertTrue(smm.isRSMEmpty()); + rm.close(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueDeletionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueDeletionPolicy.java new file mode 100644 index 0000000000000..5359178d3aab0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueDeletionPolicy.java @@ -0,0 +1,184 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent; +import org.junit.Assert; +import org.junit.Test; + +public class TestAutoCreatedQueueDeletionPolicy + extends TestCapacitySchedulerNewQueueAutoCreation { + private CapacityScheduler cs; + private AutoCreatedQueueDeletionPolicy policy; + + public void prepareForSchedule() throws Exception{ + super.startScheduler(); + + policy = getPolicy(); + cs = getCs(); + + policy.editSchedule(); + // There are no queues should be scheduled + Assert.assertEquals(policy.getMarkedForDeletion().size(), 0); + Assert.assertEquals(policy.getSentForDeletion().size(), 0); + + createQueue("root.e.e1"); + } + + @Test + public void testEditSchedule() throws Exception { + prepareForSchedule(); + // Make sure e not null + AbstractCSQueue e = (AbstractCSQueue) cs. + getQueue("root.e"); + Assert.assertNotNull(e); + Assert.assertTrue(e.isDynamicQueue()); + + // Make sure e1 not null + AbstractCSQueue e1 = (AbstractCSQueue)cs. + getQueue("root.e.e1"); + Assert.assertNotNull(e1); + Assert.assertTrue(e1.isDynamicQueue()); + // signal it because of without submit created + e1.setLastSubmittedTimestamp(Time.monotonicNow()); + + ApplicationAttemptId user0AppAttemptId = + submitApp(cs, USER0, USER0, "root.e"); + + // Wait user0 created successfully. + GenericTestUtils.waitFor(()-> cs.getQueue( + "root.e.user_0") != null, 100, + 2000); + // Make sure user0 not null + AbstractCSQueue user0 = (AbstractCSQueue) cs + .getQueue("root.e.user_0"); + Assert.assertNotNull(user0); + Assert.assertTrue(user0.isDynamicQueue()); + // Make app finished + AppAttemptRemovedSchedulerEvent event = + new AppAttemptRemovedSchedulerEvent(user0AppAttemptId, + RMAppAttemptState.FINISHED, false); + cs.handle(event); + AppRemovedSchedulerEvent rEvent = new AppRemovedSchedulerEvent( + user0AppAttemptId.getApplicationId(), RMAppState.FINISHED); + cs.handle(rEvent); + + // There are no apps in user0 + Assert.assertEquals(user0.getNumApplications(), 0); + + // Wait the time expired. + long l1 = user0.getLastSubmittedTimestamp(); + GenericTestUtils.waitFor(() -> { + long duration = (Time.monotonicNow() - l1)/1000; + return duration > getCs(). + getConfiguration().getAutoExpiredDeletionTime(); + }, 100, 2000); + + long l2 = e1.getLastSubmittedTimestamp(); + GenericTestUtils.waitFor(() -> { + long duration = (Time.monotonicNow() - l2)/1000; + return duration > getCs(). + getConfiguration().getAutoExpiredDeletionTime(); + }, 100, 2000); + + policy.editSchedule(); + // Make sure user_0 , e1 queue + // will be scheduled to mark for deletion + // because it is expired for deletion. + Assert.assertEquals(policy.getMarkedForDeletion().size(), 2); + Assert.assertTrue(policy. + getMarkedForDeletion().contains("root.e.user_0")); + Assert.assertTrue(policy. + getMarkedForDeletion().contains("root.e.e1")); + // Make sure the send for deletion is empty for first mark. + Assert.assertEquals(policy.getSentForDeletion().size(), 0); + + // Make sure user_0 , e1 queue will be scheduled to send for deletion + policy.prepareForAutoDeletion(); + Assert.assertEquals(policy.getMarkedForDeletion().size(), 0); + Assert.assertEquals(policy.getSentForDeletion().size(), 2); + + // Make sure e1, user0 not null before trigger remove. + e1 = (AbstractCSQueue) cs.getQueue("root.e.e1"); + Assert.assertNotNull(e1); + user0 = (AbstractCSQueue)cs.getQueue("root.e.user_0"); + Assert.assertNotNull(user0); + + // Make sure e1, user0 will be null after trigger remove. + policy.triggerAutoDeletionForExpiredQueues(); + Assert.assertEquals(policy.getMarkedForDeletion().size(), 0); + Assert.assertEquals(policy.getSentForDeletion().size(), 0); + + // Wait e1, user0 auto deleted. + GenericTestUtils.waitFor(()-> cs.getQueue( + "root.e.e1") == null, + 100, 2000); + GenericTestUtils.waitFor(()-> cs.getQueue( + "root.e.user_0") == null, + 100, 2000); + e1 = (AbstractCSQueue) cs.getQueue("root.e.e1"); + Assert.assertNull(e1); + user0 = (AbstractCSQueue)cs.getQueue("root.e.user_0"); + Assert.assertNull(user0); + + // Make sure e is not null, before schedule. + e = (AbstractCSQueue) cs.getQueue("root.e"); + Assert.assertNotNull(e); + + // Expired for e + // Wait e marked for deletion. + long l3 = e.getLastSubmittedTimestamp(); + GenericTestUtils.waitFor(() -> { + long duration = (Time.monotonicNow() - l3)/1000; + return duration > getCs(). + getConfiguration().getAutoExpiredDeletionTime(); + }, 100, 2000); + policy.editSchedule(); + e = (AbstractCSQueue) cs.getQueue("root.e"); + Assert.assertNotNull(e); + Assert.assertEquals(policy.getMarkedForDeletion().size(), 1); + Assert.assertEquals(policy.getSentForDeletion().size(), 0); + Assert.assertTrue(policy.getMarkedForDeletion().contains("root.e")); + + // Make sure e queue will be scheduled to send for deletion + policy.prepareForAutoDeletion(); + Assert.assertEquals(policy.getMarkedForDeletion().size(), 0); + Assert.assertEquals(policy.getSentForDeletion().size(), 1); + + // Make sure e not null before trigger remove. + e = (AbstractCSQueue) cs.getQueue("root.e"); + Assert.assertNotNull(e); + + // Make sure e will be null after trigger remove. + policy.triggerAutoDeletionForExpiredQueues(); + // Wait e1 auto deleted. + GenericTestUtils.waitFor(()-> cs.getQueue( + "root.e") == null, 100, 2000); + Assert.assertEquals(policy.getMarkedForDeletion().size(), 0); + Assert.assertEquals(policy.getSentForDeletion().size(), 0); + e = (AbstractCSQueue) cs.getQueue("root.e"); + Assert.assertNull(e); + } +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java index 2f83f1f050457..45c411f81c129 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -26,12 +28,15 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; -import org.apache.hadoop.yarn.server.resourcemanager.placement.ApplicationPlacementContext; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Assert; import org.junit.Before; @@ -39,6 +44,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Set; +import java.util.HashSet; + public class TestCapacitySchedulerNewQueueAutoCreation extends TestCapacitySchedulerAutoCreatedQueueBase { private static final Logger LOG = LoggerFactory.getLogger( @@ -50,6 +58,16 @@ public class TestCapacitySchedulerNewQueueAutoCreation private CapacityScheduler cs; private CapacitySchedulerConfiguration csConf; private CapacitySchedulerAutoQueueHandler autoQueueHandler; + private AutoCreatedQueueDeletionPolicy policy = new + AutoCreatedQueueDeletionPolicy(); + + public CapacityScheduler getCs() { + return cs; + } + + public AutoCreatedQueueDeletionPolicy getPolicy() { + return policy; + } /* Create the following structure: @@ -75,9 +93,12 @@ public void setUp() throws Exception { csConf.setAutoQueueCreationV2Enabled("root", true); csConf.setAutoQueueCreationV2Enabled("root.a", true); csConf.setAutoQueueCreationV2Enabled("root.e", true); + csConf.setAutoQueueCreationV2Enabled(PARENT_QUEUE, true); + // Test for auto deletion when expired + csConf.setAutoExpiredDeletionTime(1); } - private void startScheduler() throws Exception { + protected void startScheduler() throws Exception { RMNodeLabelsManager mgr = new NullRMNodeLabelsManager(); mgr.init(csConf); mockRM = new MockRM(csConf) { @@ -87,6 +108,8 @@ protected RMNodeLabelsManager createNodeLabelManager() { }; cs = (CapacityScheduler) mockRM.getResourceScheduler(); cs.updatePlacementRules(); + // Policy for new auto created queue's auto deletion when expired + policy.init(cs.getConfiguration(), cs.getRMContext(), cs); mockRM.start(); cs.start(); autoQueueHandler = new CapacitySchedulerAutoQueueHandler( @@ -506,7 +529,7 @@ public void testAutoCreateQueueUserLimitDisabled() throws Exception { Assert.assertTrue(user0.isDynamicQueue()); Assert.assertTrue(user0 instanceof LeafQueue); - LeafQueue user0LeafQueue = (LeafQueue)user0; + LeafQueue user0LeafQueue = (LeafQueue) user0; // Assert user limit factor is -1 Assert.assertTrue(user0LeafQueue.getUserLimitFactor() == -1); @@ -517,10 +540,11 @@ public void testAutoCreateQueueUserLimitDisabled() throws Exception { // Assert AM Resource Assert.assertEquals(user0LeafQueue.getAMResourceLimit().getMemorySize(), - user0LeafQueue.getMaxAMResourcePerQueuePercent()*MAX_MEMORY*GB, 1e-6); + user0LeafQueue. + getMaxAMResourcePerQueuePercent() * MAX_MEMORY * GB, 1e-6); // Assert user limit (no limit) when limit factor is -1 - Assert.assertEquals(MAX_MEMORY*GB, + Assert.assertEquals(MAX_MEMORY * GB, user0LeafQueue.getEffectiveMaxCapacityDown("", user0LeafQueue.getMinimumAllocation()).getMemorySize(), 1e-6); } @@ -585,7 +609,274 @@ public void testAutoQueueCreationDepthLimitFromStaticParent() } - private LeafQueue createQueue(String queuePath) throws YarnException { + @Test + public void testCapacitySchedulerAutoQueueDeletion() throws Exception { + startScheduler(); + csConf.setBoolean( + YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); + csConf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, + AutoCreatedQueueDeletionPolicy.class.getCanonicalName()); + csConf.setAutoExpiredDeletionTime(1); + cs.reinitialize(csConf, mockRM.getRMContext()); + + Set policies = new HashSet<>(); + policies.add( + AutoCreatedQueueDeletionPolicy.class.getCanonicalName()); + + Assert.assertTrue( + "No AutoCreatedQueueDeletionPolicy " + + "is present in running monitors", + cs.getSchedulingMonitorManager(). + isSameConfiguredPolicies(policies)); + + ApplicationAttemptId a2App = submitApp(cs, USER0, + "a2-auto", "root.a.a1-auto"); + + // Wait a2 created successfully. + GenericTestUtils.waitFor(()-> cs.getQueue( + "root.a.a1-auto.a2-auto") != null, + 100, 2000); + + AbstractCSQueue a1 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto"); + Assert.assertNotNull("a1 is not present", a1); + AbstractCSQueue a2 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto.a2-auto"); + Assert.assertNotNull("a2 is not present", a2); + Assert.assertTrue("a2 is not a dynamic queue", + a2.isDynamicQueue()); + + // Now there are still 1 app in a2 queue. + Assert.assertEquals(1, a2.getNumApplications()); + + // Wait the time expired. + long l1 = a2.getLastSubmittedTimestamp(); + GenericTestUtils.waitFor(() -> { + long duration = (Time.monotonicNow() - l1)/1000; + return duration > csConf.getAutoExpiredDeletionTime(); + }, 100, 2000); + + // Make sure the queue will not be deleted + // when expired with remaining apps. + a2 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto.a2-auto"); + Assert.assertNotNull("a2 is not present", a2); + + // Make app finished. + AppAttemptRemovedSchedulerEvent event = + new AppAttemptRemovedSchedulerEvent(a2App, + RMAppAttemptState.FINISHED, false); + cs.handle(event); + AppRemovedSchedulerEvent rEvent = new AppRemovedSchedulerEvent( + a2App.getApplicationId(), RMAppState.FINISHED); + cs.handle(rEvent); + + // Now there are no apps in a2 queue. + Assert.assertEquals(0, a2.getNumApplications()); + + // Wait the a2 deleted. + GenericTestUtils.waitFor(() -> { + AbstractCSQueue a2Tmp = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto.a2-auto"); + return a2Tmp == null; + }, 100, 3000); + + a2 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto.a2-auto"); + Assert.assertNull("a2 is not deleted", a2); + + // The parent will not be deleted with child queues + a1 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto"); + Assert.assertNotNull("a1 is not present", a1); + + // Now the parent queue without child + // will be deleted for expired. + // Wait a1 deleted. + GenericTestUtils.waitFor(() -> { + AbstractCSQueue a1Tmp = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto"); + return a1Tmp == null; + }, 100, 3000); + a1 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto"); + Assert.assertNull("a1 is not deleted", a1); + } + + @Test + public void testCapacitySchedulerAutoQueueDeletionDisabled() + throws Exception { + startScheduler(); + // Test for disabled auto deletion + csConf.setAutoExpiredDeletionEnabled( + "root.a.a1-auto.a2-auto", false); + csConf.setBoolean( + YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); + csConf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, + AutoCreatedQueueDeletionPolicy.class.getCanonicalName()); + csConf.setAutoExpiredDeletionTime(1); + cs.reinitialize(csConf, mockRM.getRMContext()); + + Set policies = new HashSet<>(); + policies.add( + AutoCreatedQueueDeletionPolicy.class.getCanonicalName()); + + Assert.assertTrue( + "No AutoCreatedQueueDeletionPolicy " + + "is present in running monitors", + cs.getSchedulingMonitorManager(). + isSameConfiguredPolicies(policies)); + + ApplicationAttemptId a2App = submitApp(cs, USER0, + "a2-auto", "root.a.a1-auto"); + + // Wait a2 created successfully. + GenericTestUtils.waitFor(()-> cs.getQueue( + "root.a.a1-auto.a2-auto") != null, + 100, 2000); + + AbstractCSQueue a1 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto"); + Assert.assertNotNull("a1 is not present", a1); + AbstractCSQueue a2 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto.a2-auto"); + Assert.assertNotNull("a2 is not present", a2); + Assert.assertTrue("a2 is not a dynamic queue", + a2.isDynamicQueue()); + + // Make app finished. + AppAttemptRemovedSchedulerEvent event = + new AppAttemptRemovedSchedulerEvent(a2App, + RMAppAttemptState.FINISHED, false); + cs.handle(event); + AppRemovedSchedulerEvent rEvent = new AppRemovedSchedulerEvent( + a2App.getApplicationId(), RMAppState.FINISHED); + cs.handle(rEvent); + + // Now there are no apps in a2 queue. + Assert.assertEquals(0, a2.getNumApplications()); + + // Wait the time expired. + long l1 = a2.getLastSubmittedTimestamp(); + GenericTestUtils.waitFor(() -> { + long duration = (Time.monotonicNow() - l1)/1000; + return duration > csConf.getAutoExpiredDeletionTime(); + }, 100, 2000); + + // The auto deletion is no enabled for a2-auto + a1 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto"); + Assert.assertNotNull("a1 is not present", a1); + a2 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto.a2-auto"); + Assert.assertNotNull("a2 is not present", a2); + Assert.assertTrue("a2 is not a dynamic queue", + a2.isDynamicQueue()); + + // Enabled now + // The auto deletion will work. + csConf.setAutoExpiredDeletionEnabled( + "root.a.a1-auto.a2-auto", true); + cs.reinitialize(csConf, mockRM.getRMContext()); + + // Wait the a2 deleted. + GenericTestUtils.waitFor(() -> { + AbstractCSQueue a2Tmp = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto.a2-auto"); + return a2Tmp == null; + }, 100, 3000); + + a2 = (AbstractCSQueue) cs. + getQueue("root.a.a1-auto.a2-auto"); + Assert.assertNull("a2 is not deleted", a2); + // The parent will not be deleted with child queues + a1 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto"); + Assert.assertNotNull("a1 is not present", a1); + + // Now the parent queue without child + // will be deleted for expired. + // Wait a1 deleted. + GenericTestUtils.waitFor(() -> { + AbstractCSQueue a1Tmp = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto"); + return a1Tmp == null; + }, 100, 3000); + a1 = (AbstractCSQueue) cs.getQueue( + "root.a.a1-auto"); + Assert.assertNull("a1 is not deleted", a1); + } + + @Test + public void testAutoCreateQueueAfterRemoval() throws Exception { + // queue's weights are 1 + // root + // - a (w=1) + // - b (w=1) + // - c-auto (w=1) + // - d-auto (w=1) + // - e-auto (w=1) + // - e1-auto (w=1) + startScheduler(); + + createBasicQueueStructureAndValidate(); + + // Under e, there's only one queue, so e1/e have same capacity + CSQueue e1 = cs.getQueue("root.e-auto.e1-auto"); + Assert.assertEquals(1 / 5f, e1.getAbsoluteCapacity(), 1e-6); + Assert.assertEquals(1f, e1.getQueueCapacities().getWeight(), 1e-6); + Assert.assertEquals(240 * GB, + e1.getQueueResourceQuotas().getEffectiveMinResource().getMemorySize()); + + // Check after removal e1. + cs.removeQueue(e1); + CSQueue e = cs.getQueue("root.e-auto"); + Assert.assertEquals(1 / 5f, e.getAbsoluteCapacity(), 1e-6); + Assert.assertEquals(1f, e.getQueueCapacities().getWeight(), 1e-6); + Assert.assertEquals(240 * GB, + e.getQueueResourceQuotas().getEffectiveMinResource().getMemorySize()); + + // Check after removal e. + cs.removeQueue(e); + CSQueue d = cs.getQueue("root.d-auto"); + Assert.assertEquals(1 / 4f, d.getAbsoluteCapacity(), 1e-6); + Assert.assertEquals(1f, d.getQueueCapacities().getWeight(), 1e-6); + Assert.assertEquals(300 * GB, + d.getQueueResourceQuotas().getEffectiveMinResource().getMemorySize()); + + // Check after removal d. + cs.removeQueue(d); + CSQueue c = cs.getQueue("root.c-auto"); + Assert.assertEquals(1 / 3f, c.getAbsoluteCapacity(), 1e-6); + Assert.assertEquals(1f, c.getQueueCapacities().getWeight(), 1e-6); + Assert.assertEquals(400 * GB, + c.getQueueResourceQuotas().getEffectiveMinResource().getMemorySize()); + + // Check after removal c. + cs.removeQueue(c); + CSQueue b = cs.getQueue("root.b"); + Assert.assertEquals(1 / 2f, b.getAbsoluteCapacity(), 1e-6); + Assert.assertEquals(1f, b.getQueueCapacities().getWeight(), 1e-6); + Assert.assertEquals(600 * GB, + b.getQueueResourceQuotas().getEffectiveMinResource().getMemorySize()); + + // Check can't remove static queue b. + try { + cs.removeQueue(b); + Assert.fail("Can't remove static queue b!"); + } catch (Exception ex) { + Assert.assertTrue(ex + instanceof SchedulerDynamicEditException); + } + // Check a. + CSQueue a = cs.getQueue("root.a"); + Assert.assertEquals(1 / 2f, a.getAbsoluteCapacity(), 1e-6); + Assert.assertEquals(1f, a.getQueueCapacities().getWeight(), 1e-6); + Assert.assertEquals(600 * GB, + b.getQueueResourceQuotas().getEffectiveMinResource().getMemorySize()); + } + + protected LeafQueue createQueue(String queuePath) throws YarnException { return autoQueueHandler.autoCreateQueue( CSQueueUtils.extractQueuePath(queuePath)); } From 39f7815197a4292019fd0e7800b5526a8cbdf575 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Fri, 5 Mar 2021 13:18:06 +0100 Subject: [PATCH 057/666] YARN-10639. Queueinfo related capacity, should adjusted to weight mode. Contributed by Qi Zhu. --- .../hadoop/yarn/api/records/QueueInfo.java | 23 +++++++--- .../src/main/proto/yarn_protos.proto | 1 + .../hadoop/yarn/client/cli/QueueCLI.java | 2 + .../yarn/client/ProtocolHATestBase.java | 5 ++- .../hadoop/yarn/client/cli/TestYarnCLI.java | 8 +++- .../api/records/impl/pb/QueueInfoPBImpl.java | 12 +++++ .../hadoop/yarn/api/TestPBImplRecords.java | 2 +- .../scheduler/capacity/AbstractCSQueue.java | 1 + .../TestConfigurationMutationACLPolicies.java | 2 +- .../TestSchedulerApplicationAttempt.java | 2 +- .../TestCapacitySchedulerWeightMode.java | 45 ++++++++++++++++--- .../scheduler/capacity/TestLeafQueue.java | 2 +- 12 files changed, 87 insertions(+), 18 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java index 57ea9bfe44c9a..803adad002bb0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java @@ -59,7 +59,7 @@ public static QueueInfo newInstance(String queueName, float capacity, List childQueues, List applications, QueueState queueState, Set accessibleNodeLabels, String defaultNodeLabelExpression, QueueStatistics queueStatistics, - boolean preemptionDisabled) { + boolean preemptionDisabled, float weight) { QueueInfo queueInfo = Records.newRecord(QueueInfo.class); queueInfo.setQueueName(queueName); queueInfo.setCapacity(capacity); @@ -72,6 +72,7 @@ public static QueueInfo newInstance(String queueName, float capacity, queueInfo.setDefaultNodeLabelExpression(defaultNodeLabelExpression); queueInfo.setQueueStatistics(queueStatistics); queueInfo.setPreemptionDisabled(preemptionDisabled); + queueInfo.setWeight(weight); return queueInfo; } @@ -82,14 +83,14 @@ public static QueueInfo newInstance(String queueName, float capacity, List childQueues, List applications, QueueState queueState, Set accessibleNodeLabels, String defaultNodeLabelExpression, QueueStatistics queueStatistics, - boolean preemptionDisabled, + boolean preemptionDisabled, float weight, Map queueConfigurations) { QueueInfo queueInfo = QueueInfo.newInstance(queueName, capacity, maximumCapacity, currentCapacity, childQueues, applications, queueState, accessibleNodeLabels, defaultNodeLabelExpression, queueStatistics, - preemptionDisabled); + preemptionDisabled, weight); queueInfo.setQueueConfigurations(queueConfigurations); return queueInfo; } @@ -101,7 +102,7 @@ public static QueueInfo newInstance(String queueName, float capacity, List childQueues, List applications, QueueState queueState, Set accessibleNodeLabels, String defaultNodeLabelExpression, QueueStatistics queueStatistics, - boolean preemptionDisabled, + boolean preemptionDisabled, float weight, Map queueConfigurations, boolean intraQueuePreemptionDisabled) { QueueInfo queueInfo = QueueInfo.newInstance(queueName, capacity, @@ -109,7 +110,7 @@ public static QueueInfo newInstance(String queueName, float capacity, childQueues, applications, queueState, accessibleNodeLabels, defaultNodeLabelExpression, queueStatistics, - preemptionDisabled, queueConfigurations); + preemptionDisabled, weight, queueConfigurations); queueInfo.setIntraQueuePreemptionDisabled(intraQueuePreemptionDisabled); return queueInfo; } @@ -137,6 +138,18 @@ public static QueueInfo newInstance(String queueName, float capacity, @Private @Unstable public abstract void setCapacity(float capacity); + + /** + * Get the configured weight of the queue. + * @return configured weight of the queue + */ + @Public + @Stable + public abstract float getWeight(); + + @Private + @Unstable + public abstract void setWeight(float weight); /** * Get the maximum capacity of the queue. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index d7ca2a563eabb..467f26aa11191 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -609,6 +609,7 @@ message QueueInfoProto { optional bool preemptionDisabled = 11; repeated QueueConfigurationsMapProto queueConfigurationsMap = 12; optional bool intraQueuePreemptionDisabled = 13; + optional float weight = 14; } message QueueConfigurationsProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java index 550b929d37ede..98f714fed81c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java @@ -135,6 +135,8 @@ private void printQueueInfo(PrintWriter writer, QueueInfo queueInfo) { writer.println(df.format(queueInfo.getCurrentCapacity() * 100) + "%"); writer.print("\tMaximum Capacity : "); writer.println(df.format(queueInfo.getMaximumCapacity() * 100) + "%"); + writer.print("\tWeight : "); + writer.println(df.format(queueInfo.getWeight())); writer.print("\tDefault Node Label expression : "); String nodeLabelExpression = queueInfo.getDefaultNodeLabelExpression(); nodeLabelExpression = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java index c923446a0089f..8c89cf8832555 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java @@ -669,8 +669,9 @@ public List createFakeNodeReports() { public QueueInfo createFakeQueueInfo() { return QueueInfo.newInstance("root", 100f, 100f, 50f, null, - createFakeAppReports(), QueueState.RUNNING, null, null, null, false, - null, false); + createFakeAppReports(), QueueState.RUNNING, null, + null, null, false, -1.0f, + null, false); } public List createFakeQueueUserACLInfoList() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index b5606018a8b29..90e2a75c2e102 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -1719,7 +1719,8 @@ public void testGetQueueInfo() throws Exception { nodeLabels.add("GPU"); nodeLabels.add("JDK_7"); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, - null, null, QueueState.RUNNING, nodeLabels, "GPU", null, false, null, + null, null, QueueState.RUNNING, nodeLabels, + "GPU", null, false, -1.0f, null, false); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); @@ -1733,6 +1734,7 @@ public void testGetQueueInfo() throws Exception { pw.println("\tCapacity : " + "40.00%"); pw.println("\tCurrent Capacity : " + "50.00%"); pw.println("\tMaximum Capacity : " + "80.00%"); + pw.println("\tWeight : " + "-1.00"); pw.println("\tDefault Node Label expression : " + "GPU"); pw.println("\tAccessible Node Labels : " + "JDK_7,GPU"); pw.println("\tPreemption : " + "enabled"); @@ -1887,7 +1889,8 @@ public void testGetQueueInfoPreemptionDisabled() throws Exception { public void testGetQueueInfoWithEmptyNodeLabel() throws Exception { QueueCLI cli = createAndGetQueueCLI(); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, - null, null, QueueState.RUNNING, null, null, null, true, null, true); + null, null, QueueState.RUNNING, null, null, null, + true, -1.0f, null, true); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); assertEquals(0, result); @@ -1900,6 +1903,7 @@ public void testGetQueueInfoWithEmptyNodeLabel() throws Exception { pw.println("\tCapacity : " + "40.00%"); pw.println("\tCurrent Capacity : " + "50.00%"); pw.println("\tMaximum Capacity : " + "80.00%"); + pw.println("\tWeight : " + "-1.00"); pw.println("\tDefault Node Label expression : " + NodeLabel.DEFAULT_NODE_LABEL_PARTITION); pw.println("\tAccessible Node Labels : "); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java index db9ece4dce1b8..cb3757bd992dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java @@ -124,6 +124,18 @@ public void setCapacity(float capacity) { builder.setCapacity(capacity); } + @Override + public float getWeight() { + QueueInfoProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasWeight()) ? p.getWeight() : -1; + } + + @Override + public void setWeight(float weight) { + maybeInitBuilder(); + builder.setWeight(weight); + } + @Override public void setChildQueues(List childQueues) { if (childQueues == null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index 980ec0da9f1c7..7792cad2a848a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -434,7 +434,7 @@ public static void setup() throws Exception { // it is recursive(has sub queues) typeValueCache.put(QueueInfo.class, QueueInfo.newInstance("root", 1.0f, 1.0f, 0.1f, null, null, QueueState.RUNNING, ImmutableSet.of("x", "y"), - "x && y", null, false, null, false)); + "x && y", null, false, -1.0f, null, false)); generateByNewInstance(QueueStatistics.class); generateByNewInstance(QueueUserACLInfo.class); generateByNewInstance(YarnClusterMetrics.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index e5380fa952889..7aa8e012df6f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -727,6 +727,7 @@ protected QueueInfo getQueueInfo() { queueInfo.setIntraQueuePreemptionDisabled( getIntraQueuePreemptionDisabled()); queueInfo.setQueueConfigurations(getQueueConfigurations()); + queueInfo.setWeight(queueCapacities.getWeight()); return queueInfo; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java index 8741432cc0afb..90e12ac416bd6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java @@ -67,7 +67,7 @@ public void setUp() throws IOException { private void mockQueue(String queueName, MutableConfScheduler scheduler) throws IOException { QueueInfo queueInfo = QueueInfo.newInstance(queueName, 0, 0, 0, null, null, - null, null, null, null, false, null, false); + null, null, null, null, false, -1.0f, null, false); when(scheduler.getQueueInfo(eq(queueName), anyBoolean(), anyBoolean())) .thenReturn(queueInfo); Queue queue = mock(Queue.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java index b1080f7d5fea4..93d52bdb79050 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java @@ -217,7 +217,7 @@ private Queue createQueue(String name, Queue parent) { private Queue createQueue(String name, Queue parent, float capacity) { QueueMetrics metrics = QueueMetrics.forQueue(name, parent, false, conf); QueueInfo queueInfo = QueueInfo.newInstance(name, capacity, 1.0f, 0, null, - null, QueueState.RUNNING, null, "", null, false, null, false); + null, QueueState.RUNNING, null, "", null, false, -1.0f, null, false); ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics); Queue queue = mock(Queue.class); when(queue.getMetrics()).thenReturn(metrics); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java index 1742ae45f35ed..77c8e053e3bc0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java @@ -257,7 +257,7 @@ public static Configuration getCSConfWithLabelsParentUsePctChildUseWeight( */ @Test(timeout = 300000) public void testContainerAllocateWithComplexLabelsWeightOnly() throws Exception { - internalTestContainerAlloationWithNodeLabel( + internalTestContainerAllocationWithNodeLabel( getCSConfWithQueueLabelsWeightOnly(conf)); } @@ -270,7 +270,7 @@ public void testContainerAllocateWithComplexLabelsWeightOnly() throws Exception */ @Test(timeout = 300000) public void testContainerAllocateWithComplexLabelsWeightAndPercentMixed1() throws Exception { - internalTestContainerAlloationWithNodeLabel( + internalTestContainerAllocationWithNodeLabel( getCSConfWithLabelsParentUseWeightChildUsePct(conf)); } @@ -283,7 +283,7 @@ public void testContainerAllocateWithComplexLabelsWeightAndPercentMixed1() throw */ @Test(timeout = 300000) public void testContainerAllocateWithComplexLabelsWeightAndPercentMixed2() throws Exception { - internalTestContainerAlloationWithNodeLabel( + internalTestContainerAllocationWithNodeLabel( getCSConfWithLabelsParentUsePctChildUseWeight(conf)); } @@ -338,8 +338,43 @@ public void testGetCapacityOrWeightStringParentPctLeafWeights() } } - private void internalTestContainerAlloationWithNodeLabel(Configuration csConf) - throws Exception { + @Test + public void testQueueInfoWeight() throws Exception { + MockRM rm = new MockRM(conf); + rm.init(conf); + rm.start(); + + CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration( + conf); + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[] {"a", "b", "default"}); + csConf.setNonLabeledQueueWeight("root.a", 1); + csConf.setNonLabeledQueueWeight("root.b", 2); + csConf.setNonLabeledQueueWeight("root.default", 3); + + // Check queue info capacity + CapacityScheduler cs = + (CapacityScheduler)rm.getRMContext().getScheduler(); + cs.reinitialize(csConf, rm.getRMContext()); + + LeafQueue a = (LeafQueue) + cs.getQueue("root.a"); + Assert.assertNotNull(a); + Assert.assertEquals(a.getQueueCapacities().getWeight(), + a.getQueueInfo(false, + false).getWeight(), 1e-6); + + LeafQueue b = (LeafQueue) + cs.getQueue("root.b"); + Assert.assertNotNull(b); + Assert.assertEquals(b.getQueueCapacities().getWeight(), + b.getQueueInfo(false, + false).getWeight(), 1e-6); + rm.close(); + } + + private void internalTestContainerAllocationWithNodeLabel( + Configuration csConf) throws Exception { /* * Queue structure: * root (*) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 889da07adda37..1a45908ee9170 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -4905,7 +4905,7 @@ private AbstractCSQueue createQueue(String name, Queue parent, float capacity, float absCap, Resource res) { CSQueueMetrics metrics = CSQueueMetrics.forQueue(name, parent, false, cs.getConf()); QueueInfo queueInfo = QueueInfo.newInstance(name, capacity, 1.0f, 0, null, - null, QueueState.RUNNING, null, "", null, false, null, false); + null, QueueState.RUNNING, null, "", null, false, -1.0f, null, false); ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics); AbstractCSQueue queue = mock(AbstractCSQueue.class); when(queue.getMetrics()).thenReturn(metrics); From 5415d2eb948c95a2d0529d18519439bef72d53c1 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Fri, 5 Mar 2021 13:50:45 +0100 Subject: [PATCH 058/666] YARN-10642. Race condition: AsyncDispatcher can get stuck by the changes introduced in YARN-8995. Contributed by zhengchenyu. --- .../hadoop/yarn/event/AsyncDispatcher.java | 19 +++--- .../yarn/event/TestAsyncDispatcher.java | 58 ++++++++++++++++++- 2 files changed, 68 insertions(+), 9 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index 79ad464200d81..f9deab06ff27f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -20,11 +20,11 @@ import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; -import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -268,11 +268,16 @@ public EventHandler getEventHandler() { } class GenericEventHandler implements EventHandler { - private void printEventQueueDetails(BlockingQueue queue) { - Map counterMap = eventQueue.stream(). - collect(Collectors. - groupingBy(e -> e.getType(), Collectors.counting()) - ); + private void printEventQueueDetails() { + Iterator iterator = eventQueue.iterator(); + Map counterMap = new HashMap<>(); + while (iterator.hasNext()) { + Enum eventType = iterator.next().getType(); + if (!counterMap.containsKey(eventType)) { + counterMap.put(eventType, 0L); + } + counterMap.put(eventType, counterMap.get(eventType) + 1); + } for (Map.Entry entry : counterMap.entrySet()) { long num = entry.getValue(); LOG.info("Event type: " + entry.getKey() @@ -295,7 +300,7 @@ public void handle(Event event) { if (qSize != 0 && qSize % detailsInterval == 0 && lastEventDetailsQueueSizeLogged != qSize) { lastEventDetailsQueueSizeLogged = qSize; - printEventQueueDetails(eventQueue); + printEventQueueDetails(); printTrigger = true; } int remCapacity = eventQueue.remainingCapacity(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java index 762e2280ca33b..55ddd12fce98e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java @@ -97,12 +97,23 @@ private enum DummyType { } private static class TestHandler implements EventHandler { + + private long sleepTime = 1500; + + TestHandler() { + } + + TestHandler(long sleepTime) { + this.sleepTime = sleepTime; + } + @Override public void handle(Event event) { try { // As long as 10000 events queued - Thread.sleep(1500); - } catch (InterruptedException e) {} + Thread.sleep(this.sleepTime); + } catch (InterruptedException e) { + } } } @@ -170,11 +181,54 @@ public void testPrintDispatcherEventDetails() throws Exception { //Make sure more than one event to take verify(log, atLeastOnce()). info("Latest dispatch event type: TestEventType"); + } finally { + //... restore logger object + logger.set(null, oldLog); dispatcher.stop(); + } + } + + //Test print dispatcher details when the blocking queue is heavy + @Test(timeout = 60000) + public void testPrintDispatcherEventDetailsAvoidDeadLoop() throws Exception { + for (int i = 0; i < 5; i++) { + testPrintDispatcherEventDetailsAvoidDeadLoopInternal(); + } + } + + public void testPrintDispatcherEventDetailsAvoidDeadLoopInternal() + throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + conf.setInt(YarnConfiguration. + YARN_DISPATCHER_PRINT_EVENTS_INFO_THRESHOLD, 10); + Logger log = mock(Logger.class); + AsyncDispatcher dispatcher = new AsyncDispatcher(); + dispatcher.init(conf); + + Field logger = AsyncDispatcher.class.getDeclaredField("LOG"); + logger.setAccessible(true); + Field modifiers = Field.class.getDeclaredField("modifiers"); + modifiers.setAccessible(true); + modifiers.setInt(logger, logger.getModifiers() & ~Modifier.FINAL); + Object oldLog = logger.get(null); + + try { + logger.set(null, log); + dispatcher.register(TestEnum.class, new TestHandler(0)); + dispatcher.start(); + + for (int i = 0; i < 10000; ++i) { + Event event = mock(Event.class); + when(event.getType()).thenReturn(TestEnum.TestEventType); + dispatcher.getEventHandler().handle(event); + } + Thread.sleep(3000); } finally { //... restore logger object logger.set(null, oldLog); + dispatcher.stop(); } } + } From dc2669b44d9204312475cec55675625eb0689e09 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Fri, 5 Mar 2021 22:56:51 +0900 Subject: [PATCH 059/666] HADOOP-17563. Update Bouncy Castle to 1.68. (#2740) --- LICENSE-binary | 4 ++-- hadoop-project/pom.xml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/LICENSE-binary b/LICENSE-binary index 4a4b953913c8f..9053b60e13786 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -468,8 +468,8 @@ com.microsoft.azure:azure-cosmosdb-gateway:2.4.5 com.microsoft.azure:azure-data-lake-store-sdk:2.3.3 com.microsoft.azure:azure-keyvault-core:1.0.0 com.microsoft.sqlserver:mssql-jdbc:6.2.1.jre7 -org.bouncycastle:bcpkix-jdk15on:1.60 -org.bouncycastle:bcprov-jdk15on:1.60 +org.bouncycastle:bcpkix-jdk15on:1.68 +org.bouncycastle:bcprov-jdk15on:1.68 org.checkerframework:checker-qual:2.5.2 org.codehaus.mojo:animal-sniffer-annotations:1.17 org.jruby.jcodings:jcodings:1.0.13 diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index c26ceacc67308..7c74615dbce4c 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -107,7 +107,7 @@ 27.0-jre 4.2.3 - 1.60 + 1.68 2.0.0-M21 From 13151cef72bdfc8871bf7bc87ef14b0939c5219a Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Fri, 5 Mar 2021 15:56:56 +0100 Subject: [PATCH 060/666] YARN-10640. Adjust the queue Configured capacity to Configured weight number for weight mode in UI. Contributed by Qi Zhu. --- .../resourcemanager/webapp/CapacitySchedulerPage.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java index 47c888d189dd9..8212a728e4f8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java @@ -159,8 +159,12 @@ private void renderQueueCapacityInfo(ResponseInfo ri, String label) { __("Used Capacity:", appendPercent(resourceUsages.getUsed(), capacities.getUsedCapacity() / 100)) - .__("Configured Capacity:", - capacities.getConfiguredMinResource() == null ? + .__(capacities.getWeight() != -1 ? + "Configured Weight:" : + "Configured Capacity:", + capacities.getWeight() != -1 ? + capacities.getWeight() : + capacities.getConfiguredMinResource() == null ? Resources.none().toString() : capacities.getConfiguredMinResource().toString()) .__("Configured Max Capacity:", From 3a55db0affdfd3b10072d381d65c9a6c2f4ff8a4 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Fri, 5 Mar 2021 19:46:40 +0000 Subject: [PATCH 061/666] YARN-10664. Allow parameter expansion in NM_ADMIN_USER_ENV. Contributed by Jim Brennan. --- .../launcher/ContainerLaunch.java | 66 ++++++++----- .../launcher/TestContainerLaunch.java | 97 ++++++++++++++++++- 2 files changed, 136 insertions(+), 27 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index 4ea790940a4b0..e864c14ad7092 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -179,15 +179,36 @@ public static String expandEnvironment(String var, return var; } - private Map expandAllEnvironmentVars( - ContainerLaunchContext launchContext, Path containerLogDir) { - Map environment = launchContext.getEnvironment(); + private void expandAllEnvironmentVars( + Map environment, Path containerLogDir) { for (Entry entry : environment.entrySet()) { String value = entry.getValue(); value = expandEnvironment(value, containerLogDir); entry.setValue(value); } - return environment; + } + + private void addKeystoreVars(Map environment, + Path containerWorkDir) { + environment.put(ApplicationConstants.KEYSTORE_FILE_LOCATION_ENV_NAME, + new Path(containerWorkDir, + ContainerLaunch.KEYSTORE_FILE).toUri().getPath()); + environment.put(ApplicationConstants.KEYSTORE_PASSWORD_ENV_NAME, + new String(container.getCredentials().getSecretKey( + AMSecretKeys.YARN_APPLICATION_AM_KEYSTORE_PASSWORD), + StandardCharsets.UTF_8)); + } + + private void addTruststoreVars(Map environment, + Path containerWorkDir) { + environment.put( + ApplicationConstants.TRUSTSTORE_FILE_LOCATION_ENV_NAME, + new Path(containerWorkDir, + ContainerLaunch.TRUSTSTORE_FILE).toUri().getPath()); + environment.put(ApplicationConstants.TRUSTSTORE_PASSWORD_ENV_NAME, + new String(container.getCredentials().getSecretKey( + AMSecretKeys.YARN_APPLICATION_AM_TRUSTSTORE_PASSWORD), + StandardCharsets.UTF_8)); } @Override @@ -222,8 +243,10 @@ public Integer call() { } launchContext.setCommands(newCmds); - Map environment = expandAllEnvironmentVars( - launchContext, containerLogDir); + // The actual expansion of environment variables happens after calling + // sanitizeEnv. This allows variables specified in NM_ADMIN_USER_ENV + // to reference user or container-defined variables. + Map environment = launchContext.getEnvironment(); // /////////////////////////// End of variable expansion // Use this to track variables that are added to the environment by nm. @@ -289,13 +312,6 @@ public Integer call() { lfs.create(nmPrivateKeystorePath, EnumSet.of(CREATE, OVERWRITE))) { keystoreOutStream.write(keystore); - environment.put(ApplicationConstants.KEYSTORE_FILE_LOCATION_ENV_NAME, - new Path(containerWorkDir, - ContainerLaunch.KEYSTORE_FILE).toUri().getPath()); - environment.put(ApplicationConstants.KEYSTORE_PASSWORD_ENV_NAME, - new String(container.getCredentials().getSecretKey( - AMSecretKeys.YARN_APPLICATION_AM_KEYSTORE_PASSWORD), - StandardCharsets.UTF_8)); } } else { nmPrivateKeystorePath = null; @@ -307,14 +323,6 @@ public Integer call() { lfs.create(nmPrivateTruststorePath, EnumSet.of(CREATE, OVERWRITE))) { truststoreOutStream.write(truststore); - environment.put( - ApplicationConstants.TRUSTSTORE_FILE_LOCATION_ENV_NAME, - new Path(containerWorkDir, - ContainerLaunch.TRUSTSTORE_FILE).toUri().getPath()); - environment.put(ApplicationConstants.TRUSTSTORE_PASSWORD_ENV_NAME, - new String(container.getCredentials().getSecretKey( - AMSecretKeys.YARN_APPLICATION_AM_TRUSTSTORE_PASSWORD), - StandardCharsets.UTF_8)); } } else { nmPrivateTruststorePath = null; @@ -335,6 +343,16 @@ public Integer call() { containerLogDirs, localResources, nmPrivateClasspathJarDir, nmEnvVars); + expandAllEnvironmentVars(environment, containerLogDir); + + // Add these if needed after expanding so we don't expand key values. + if (keystore != null) { + addKeystoreVars(environment, containerWorkDir); + } + if (truststore != null) { + addTruststoreVars(environment, containerWorkDir); + } + prepareContainer(localResources, containerLocalDirs); // Write out the environment @@ -1628,13 +1646,13 @@ public void sanitizeEnv(Map environment, Path pwd, } // variables here will be forced in, even if the container has - // specified them. + // specified them. Note: we do not track these in nmVars, to + // allow them to be ordered properly if they reference variables + // defined by the user. String defEnvStr = conf.get(YarnConfiguration.DEFAULT_NM_ADMIN_USER_ENV); Apps.setEnvFromInputProperty(environment, YarnConfiguration.NM_ADMIN_USER_ENV, defEnvStr, conf, File.pathSeparator); - nmVars.addAll(Apps.getEnvVarsFromInputProperty( - YarnConfiguration.NM_ADMIN_USER_ENV, defEnvStr, conf)); if (!Shell.WINDOWS) { // maybe force path components diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index f258572c01f1f..a9bcef77c3963 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -673,7 +673,7 @@ public void testPrependDistcache() throws Exception { Container container = mock(Container.class); when(container.getContainerId()).thenReturn(cId); when(container.getLaunchContext()).thenReturn(containerLaunchContext); - when(container.getLocalizedResources()).thenReturn(null); + when(container.localizationCountersAsString()).thenReturn("1,2,3,4,5"); Dispatcher dispatcher = mock(Dispatcher.class); EventHandler eventHandler = new EventHandler() { public void handle(Event event) { @@ -814,8 +814,6 @@ public void handle(Event event) { Assert.assertTrue(userSetEnv.containsKey(testKey1)); Assert.assertTrue(userSetEnv.containsKey(testKey2)); Assert.assertTrue(userSetEnv.containsKey(testKey3)); - Assert.assertTrue(nmEnvTrack.contains("MALLOC_ARENA_MAX")); - Assert.assertTrue(nmEnvTrack.contains("MOUNT_LIST")); Assert.assertEquals(userMallocArenaMaxVal + File.pathSeparator + mallocArenaMaxVal, userSetEnv.get("MALLOC_ARENA_MAX")); Assert.assertEquals(testVal1, userSetEnv.get(testKey1)); @@ -1857,6 +1855,7 @@ public void testContainerLaunchOnConfigurationError() throws Exception { when(id.toString()).thenReturn("1"); when(container.getContainerId()).thenReturn(id); when(container.getUser()).thenReturn("user"); + when(container.localizationCountersAsString()).thenReturn("1,2,3,4,5"); ContainerLaunchContext clc = mock(ContainerLaunchContext.class); when(clc.getCommands()).thenReturn(Lists.newArrayList()); when(container.getLaunchContext()).thenReturn(clc); @@ -2453,6 +2452,7 @@ public void testDistributedCacheDirs() throws Exception { .newContainerId(ApplicationAttemptId.newInstance(appId, 1), 1); when(container.getContainerId()).thenReturn(containerId); when(container.getUser()).thenReturn("test"); + when(container.localizationCountersAsString()).thenReturn("1,2,3,4,5"); when(container.getLocalizedResources()) .thenReturn(Collections.> emptyMap()); @@ -2562,6 +2562,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { when(container.getLaunchContext()).thenReturn(clc); Credentials credentials = mock(Credentials.class); when(container.getCredentials()).thenReturn(credentials); + when(container.localizationCountersAsString()).thenReturn("1,2,3,4,5"); doAnswer(new Answer() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { @@ -2662,4 +2663,94 @@ private String readStringFromPath(Path p) throws IOException { return new String(bytes); } } + + @Test(timeout = 20000) + public void testExpandNmAdmEnv() throws Exception { + // setup mocks + Dispatcher dispatcher = mock(Dispatcher.class); + EventHandler handler = mock(EventHandler.class); + when(dispatcher.getEventHandler()).thenReturn(handler); + ContainerExecutor containerExecutor = mock(ContainerExecutor.class); + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + DataOutputStream dos = (DataOutputStream) args[0]; + dos.writeBytes("script"); + return null; + } + }).when(containerExecutor).writeLaunchEnv( + any(), any(), any(), any(), any(), any(), any()); + Application app = mock(Application.class); + ApplicationId appId = mock(ApplicationId.class); + when(appId.toString()).thenReturn("1"); + when(app.getAppId()).thenReturn(appId); + Container container = mock(Container.class); + ContainerId id = mock(ContainerId.class); + when(id.toString()).thenReturn("1"); + when(container.getContainerId()).thenReturn(id); + when(container.getUser()).thenReturn("user"); + ContainerLaunchContext clc = mock(ContainerLaunchContext.class); + when(clc.getCommands()).thenReturn(Lists.newArrayList()); + when(container.getLaunchContext()).thenReturn(clc); + Credentials credentials = mock(Credentials.class); + when(container.getCredentials()).thenReturn(credentials); + when(container.localizationCountersAsString()).thenReturn("1,2,3,4,5"); + + // Define user environment variables. + Map userSetEnv = new HashMap(); + String userVar = "USER_VAR"; + String userVarVal = "user-var-value"; + userSetEnv.put(userVar, userVarVal); + when(clc.getEnvironment()).thenReturn(userSetEnv); + + YarnConfiguration localConf = new YarnConfiguration(conf); + + // Admin Env var that depends on USER_VAR1 + String testKey1 = "TEST_KEY1"; + String testVal1 = "relies on {{USER_VAR}}"; + localConf.set( + YarnConfiguration.NM_ADMIN_USER_ENV + "." + testKey1, testVal1); + String testVal1Expanded; // this is what we expect after {{}} expansion + if (Shell.WINDOWS) { + testVal1Expanded = "relies on %USER_VAR%"; + } else { + testVal1Expanded = "relies on $USER_VAR"; + } + // Another Admin Env var that depends on the first one + String testKey2 = "TEST_KEY2"; + String testVal2 = "relies on {{TEST_KEY1}}"; + localConf.set( + YarnConfiguration.NM_ADMIN_USER_ENV + "." + testKey2, testVal2); + String testVal2Expanded; // this is what we expect after {{}} expansion + if (Shell.WINDOWS) { + testVal2Expanded = "relies on %TEST_KEY1%"; + } else { + testVal2Expanded = "relies on $TEST_KEY1"; + } + + // call containerLaunch + ContainerLaunch containerLaunch = new ContainerLaunch( + distContext, localConf, dispatcher, + containerExecutor, app, container, dirsHandler, containerManager); + containerLaunch.call(); + + // verify the nmPrivate paths and files + ArgumentCaptor cscArgument = + ArgumentCaptor.forClass(ContainerStartContext.class); + verify(containerExecutor, times(1)).launchContainer(cscArgument.capture()); + ContainerStartContext csc = cscArgument.getValue(); + Assert.assertEquals("script", + readStringFromPath(csc.getNmPrivateContainerScriptPath())); + + // verify env + ArgumentCaptor envArgument = ArgumentCaptor.forClass(Map.class); + verify(containerExecutor, times(1)).writeLaunchEnv(any(), + envArgument.capture(), any(), any(), any(), any(), any()); + Map env = envArgument.getValue(); + Assert.assertEquals(userVarVal, env.get(userVar)); + Assert.assertEquals(testVal1Expanded, env.get(testKey1)); + Assert.assertEquals(testVal2Expanded, env.get(testKey2)); + } + } From 85700f2c5f9fb728fa87b88d7117e5a590430732 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Fri, 5 Mar 2021 23:13:35 +0100 Subject: [PATCH 062/666] YARN-10672. All testcases in TestReservations are flaky. Contributed By Szilard Nemeth. --- .../scheduler/capacity/TestReservations.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java index a9cee2ddcbd07..1168f648024b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java @@ -105,7 +105,6 @@ public void setUp() throws Exception { CapacityScheduler spyCs = new CapacityScheduler(); cs = spy(spyCs); rmContext = TestUtils.getMockRMContext(); - } private void setup(CapacitySchedulerConfiguration csConf) throws Exception { @@ -114,6 +113,9 @@ private void setup(CapacitySchedulerConfiguration csConf) throws Exception { private void setup(CapacitySchedulerConfiguration csConf, boolean addUserLimits) throws Exception { + //All stub calls on the spy object of the 'cs' field should happen + //before cs.start() is invoked. See YARN-10672 for more details. + when(cs.getNumClusterNodes()).thenReturn(3); csConf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true); final String newRoot = "root" + System.currentTimeMillis(); @@ -156,8 +158,6 @@ private void setup(CapacitySchedulerConfiguration csConf, cs.setRMContext(spyRMContext); cs.init(csConf); cs.start(); - - when(cs.getNumClusterNodes()).thenReturn(3); } private static final String A = "a"; From 8a44b527772d19fbc4c21383be45c1a6ab0ad9a3 Mon Sep 17 00:00:00 2001 From: Haoze Wu <18595686+functioner@users.noreply.github.com> Date: Sat, 6 Mar 2021 08:26:16 -0500 Subject: [PATCH 063/666] HADOOP-17552. Change ipc.client.rpc-timeout.ms from 0 to 120000 by default to avoid potential hang. (#2727) --- .../java/org/apache/hadoop/fs/CommonConfigurationKeys.java | 2 +- .../hadoop-common/src/main/resources/core-default.xml | 5 +++-- .../src/test/java/org/apache/hadoop/ipc/TestIPC.java | 1 + 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index c08af395ad2f9..07776763e97a4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -58,7 +58,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { public static final String IPC_CLIENT_RPC_TIMEOUT_KEY = "ipc.client.rpc-timeout.ms"; /** Default value for IPC_CLIENT_RPC_TIMEOUT_KEY. */ - public static final int IPC_CLIENT_RPC_TIMEOUT_DEFAULT = 0; + public static final int IPC_CLIENT_RPC_TIMEOUT_DEFAULT = 120000; /** Responses larger than this will be logged */ public static final String IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY = "ipc.server.max.response.size"; diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index c4edc5d614ed7..11b790408b79d 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -2383,9 +2383,10 @@ ipc.client.rpc-timeout.ms - 0 + 120000 Timeout on waiting response from server, in milliseconds. - If ipc.client.ping is set to true and this rpc-timeout is greater than + If this rpc-timeout is 0, it means no timeout. If this rpc-timeout is greater + than 0, and ipc.client.ping is set to true, and this rpc-timeout is greater than the value of ipc.ping.interval, the effective value of the rpc-timeout is rounded up to multiple of ipc.ping.interval. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index 32881523fded0..d486c7ea13102 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -1456,6 +1456,7 @@ public void run() { @Test public void testClientGetTimeout() throws IOException { Configuration config = new Configuration(); + config.setInt(CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_KEY, 0); assertThat(Client.getTimeout(config)).isEqualTo(-1); } From c741aa7c8d0fc8300405db8dc555472848a9b9c9 Mon Sep 17 00:00:00 2001 From: tomscut Date: Mon, 1 Mar 2021 16:35:12 -0800 Subject: [PATCH 064/666] HDFS-15808. Add metrics for FSNamesystem read/write lock hold long time. (#2668) Contributed by tomscut. --- .../hdfs/server/namenode/FSNamesystem.java | 14 ++++++++ .../server/namenode/FSNamesystemLock.java | 34 +++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 22b4b92f447bb..ff03d7b0522fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4830,6 +4830,20 @@ public int getFsLockQueueLength() { return fsLock.getQueueLength(); } + @Metric(value = {"ReadLockLongHoldCount", "The number of time " + + "the read lock has been held for longer than the threshold"}, + type = Metric.Type.COUNTER) + public long getNumOfReadLockLongHold() { + return fsLock.getNumOfReadLockLongHold(); + } + + @Metric(value = {"WriteLockLongHoldCount", "The number of time " + + "the write lock has been held for longer than the threshold"}, + type = Metric.Type.COUNTER) + public long getNumOfWriteLockLongHold() { + return fsLock.getNumOfWriteLockLongHold(); + } + int getNumberOfDatanodes(DatanodeReportType type) { readLock(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java index c03cfd50756b4..842c6b3f2df59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java @@ -109,6 +109,16 @@ public Long initialValue() { private final AtomicReference longestReadLockHeldInfo = new AtomicReference<>(new LockHeldInfo()); private LockHeldInfo longestWriteLockHeldInfo = new LockHeldInfo(); + /** + * The number of time the read lock + * has been held longer than the threshold. + */ + private final AtomicLong numReadLockLongHold = new AtomicLong(0); + /** + * The number of time the write lock + * has been held for longer than the threshold. + */ + private final AtomicLong numWriteLockLongHold = new AtomicLong(0); @VisibleForTesting static final String OP_NAME_OTHER = "OTHER"; @@ -182,6 +192,7 @@ public void readUnlock(String opName, final long readLockIntervalMs = TimeUnit.NANOSECONDS.toMillis(readLockIntervalNanos); if (needReport && readLockIntervalMs >= this.readLockReportingThresholdMs) { + numReadLockLongHold.incrementAndGet(); String lockReportInfo = null; boolean done = false; while (!done) { @@ -298,6 +309,7 @@ private void writeUnlock(String opName, boolean suppressWriteLockReport, LogAction logAction = LogThrottlingHelper.DO_NOT_LOG; if (needReport && writeLockIntervalMs >= this.writeLockReportingThresholdMs) { + numWriteLockLongHold.incrementAndGet(); if (longestWriteLockHeldInfo.getIntervalMs() <= writeLockIntervalMs) { String lockReportInfo = lockReportInfoSupplier != null ? " (" + lockReportInfoSupplier.get() + ")" : ""; @@ -362,6 +374,28 @@ public int getQueueLength() { return coarseLock.getQueueLength(); } + /** + * Returns the number of time the read lock + * has been held longer than the threshold. + * + * @return long - Number of time the read lock + * has been held longer than the threshold + */ + public long getNumOfReadLockLongHold() { + return numReadLockLongHold.get(); + } + + /** + * Returns the number of time the write lock + * has been held longer than the threshold. + * + * @return long - Number of time the write lock + * has been held longer than the threshold. + */ + public long getNumOfWriteLockLongHold() { + return numWriteLockLongHold.get(); + } + /** * Add the lock hold time for a recent operation to the metrics. * @param operationName Name of the operation for which to record the time From 4bcd9bd20d6cd5ad3c6b220e11edc24263d20cb3 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Mon, 8 Mar 2021 12:18:46 +0800 Subject: [PATCH 065/666] HDFS-15384. Document getLocatedBlocks(String src, long start) of DFSClient only return partial blocks. Contributed by Yang Yun. --- .../main/java/org/apache/hadoop/hdfs/DFSClient.java | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index c2b03cdaad79c..f568308aff8f6 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -861,6 +861,18 @@ public long getRefreshReadBlkLocationsInterval() { return dfsClientConf.getRefreshReadBlockLocationsMS(); } + /** + * Get locations of the blocks of the specified file `src` from offset + * `start` within the prefetch size which is related to parameter + * `dfs.client.read.prefetch.size`. DataNode locations for each block are + * sorted by the proximity to the client. Please note that the prefetch size + * is not equal file length generally. + * + * @param src the file path. + * @param start starting offset. + * @return LocatedBlocks + * @throws IOException + */ public LocatedBlocks getLocatedBlocks(String src, long start) throws IOException { return getLocatedBlocks(src, start, dfsClientConf.getPrefetchSize()); From 896d8c05d56631deb5ced6c3a6d826ae906d2f70 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Mon, 8 Mar 2021 12:29:44 +0100 Subject: [PATCH 066/666] YARN-10658. CapacityScheduler QueueInfo add queue path field to avoid ambiguous QueueName. Contributed by Qi Zhu. --- .../hadoop/yarn/api/records/QueueInfo.java | 26 +++++++++++--- .../src/main/proto/yarn_protos.proto | 1 + .../hadoop/yarn/client/cli/QueueCLI.java | 2 ++ .../yarn/client/ProtocolHATestBase.java | 2 +- .../hadoop/yarn/client/cli/TestYarnCLI.java | 10 ++++-- .../api/records/impl/pb/QueueInfoPBImpl.java | 16 +++++++++ .../hadoop/yarn/api/TestPBImplRecords.java | 3 +- .../scheduler/capacity/AbstractCSQueue.java | 1 + .../TestConfigurationMutationACLPolicies.java | 17 +++++---- .../TestSchedulerApplicationAttempt.java | 3 +- .../capacity/TestCapacityScheduler.java | 6 ++++ ...CapacitySchedulerNewQueueAutoCreation.java | 35 +++++++++++++++++++ .../scheduler/capacity/TestLeafQueue.java | 15 ++++---- 13 files changed, 114 insertions(+), 23 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java index 803adad002bb0..eb703cdb2ffa9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java @@ -54,7 +54,8 @@ public abstract class QueueInfo { @Private @Unstable - public static QueueInfo newInstance(String queueName, float capacity, + public static QueueInfo newInstance(String queueName, + String queuePath, float capacity, float maximumCapacity, float currentCapacity, List childQueues, List applications, QueueState queueState, Set accessibleNodeLabels, @@ -62,6 +63,7 @@ public static QueueInfo newInstance(String queueName, float capacity, boolean preemptionDisabled, float weight) { QueueInfo queueInfo = Records.newRecord(QueueInfo.class); queueInfo.setQueueName(queueName); + queueInfo.setQueuePath(queuePath); queueInfo.setCapacity(capacity); queueInfo.setMaximumCapacity(maximumCapacity); queueInfo.setCurrentCapacity(currentCapacity); @@ -78,14 +80,15 @@ public static QueueInfo newInstance(String queueName, float capacity, @Private @Unstable - public static QueueInfo newInstance(String queueName, float capacity, + public static QueueInfo newInstance(String queueName, + String queuePath, float capacity, float maximumCapacity, float currentCapacity, List childQueues, List applications, QueueState queueState, Set accessibleNodeLabels, String defaultNodeLabelExpression, QueueStatistics queueStatistics, boolean preemptionDisabled, float weight, Map queueConfigurations) { - QueueInfo queueInfo = QueueInfo.newInstance(queueName, capacity, + QueueInfo queueInfo = QueueInfo.newInstance(queueName, queuePath, capacity, maximumCapacity, currentCapacity, childQueues, applications, queueState, accessibleNodeLabels, @@ -97,7 +100,8 @@ public static QueueInfo newInstance(String queueName, float capacity, @Private @Unstable - public static QueueInfo newInstance(String queueName, float capacity, + public static QueueInfo newInstance(String queueName, + String queuePath, float capacity, float maximumCapacity, float currentCapacity, List childQueues, List applications, QueueState queueState, Set accessibleNodeLabels, @@ -105,7 +109,7 @@ public static QueueInfo newInstance(String queueName, float capacity, boolean preemptionDisabled, float weight, Map queueConfigurations, boolean intraQueuePreemptionDisabled) { - QueueInfo queueInfo = QueueInfo.newInstance(queueName, capacity, + QueueInfo queueInfo = QueueInfo.newInstance(queueName, queuePath, capacity, maximumCapacity, currentCapacity, childQueues, applications, queueState, accessibleNodeLabels, @@ -126,6 +130,18 @@ public static QueueInfo newInstance(String queueName, float capacity, @Private @Unstable public abstract void setQueueName(String queueName); + + /** + * Get the path of the queue. + * @return path of the queue + */ + @Public + @Stable + public abstract String getQueuePath(); + + @Private + @Unstable + public abstract void setQueuePath(String queuePath); /** * Get the configured capacity of the queue. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 467f26aa11191..0b360dfca5941 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -610,6 +610,7 @@ message QueueInfoProto { repeated QueueConfigurationsMapProto queueConfigurationsMap = 12; optional bool intraQueuePreemptionDisabled = 13; optional float weight = 14; + optional string queuePath = 15; } message QueueConfigurationsProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java index 98f714fed81c0..c3566437b4ab7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java @@ -125,6 +125,8 @@ private int listQueue(String queueName) throws YarnException, IOException { private void printQueueInfo(PrintWriter writer, QueueInfo queueInfo) { writer.print("Queue Name : "); writer.println(queueInfo.getQueueName()); + writer.print("Queue Path : "); + writer.println(queueInfo.getQueuePath()); writer.print("\tState : "); writer.println(queueInfo.getQueueState()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java index 8c89cf8832555..3972ca67ad326 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java @@ -668,7 +668,7 @@ public List createFakeNodeReports() { } public QueueInfo createFakeQueueInfo() { - return QueueInfo.newInstance("root", 100f, 100f, 50f, null, + return QueueInfo.newInstance("root", "root", 100f, 100f, 50f, null, createFakeAppReports(), QueueState.RUNNING, null, null, null, false, -1.0f, null, false); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 90e2a75c2e102..1ed7dab2b8694 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -1718,7 +1718,9 @@ public void testGetQueueInfo() throws Exception { Set nodeLabels = new HashSet(); nodeLabels.add("GPU"); nodeLabels.add("JDK_7"); - QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, + QueueInfo queueInfo = QueueInfo. + newInstance("queueA", "root.queueA", + 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, nodeLabels, "GPU", null, false, -1.0f, null, false); @@ -1730,6 +1732,7 @@ public void testGetQueueInfo() throws Exception { PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); + pw.println("Queue Path : " + "root.queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.00%"); pw.println("\tCurrent Capacity : " + "50.00%"); @@ -1888,7 +1891,9 @@ public void testGetQueueInfoPreemptionDisabled() throws Exception { @Test public void testGetQueueInfoWithEmptyNodeLabel() throws Exception { QueueCLI cli = createAndGetQueueCLI(); - QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, + QueueInfo queueInfo = QueueInfo. + newInstance("queueA", "root.queueA", + 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, null, null, null, true, -1.0f, null, true); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); @@ -1899,6 +1904,7 @@ public void testGetQueueInfoWithEmptyNodeLabel() throws Exception { PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); + pw.println("Queue Path : " + "root.queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.00%"); pw.println("\tCurrent Capacity : " + "50.00%"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java index cb3757bd992dd..c69fddbc4c90a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java @@ -101,6 +101,12 @@ public String getQueueName() { return (p.hasQueueName()) ? p.getQueueName() : null; } + @Override + public String getQueuePath() { + QueueInfoProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasQueuePath()) ? p.getQueuePath() : null; + } + @Override public QueueState getQueueState() { QueueInfoProtoOrBuilder p = viaProto ? proto : builder; @@ -166,6 +172,16 @@ public void setQueueName(String queueName) { builder.setQueueName(queueName); } + @Override + public void setQueuePath(String queuePath) { + maybeInitBuilder(); + if (queuePath == null) { + builder.clearQueuePath(); + return; + } + builder.setQueuePath(queuePath); + } + @Override public void setQueueState(QueueState queueState) { maybeInitBuilder(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index 7792cad2a848a..96624d66dee86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -432,7 +432,8 @@ public static void setup() throws Exception { generateByNewInstance(ContainerUpdateResponse.class); // genByNewInstance does not apply to QueueInfo, cause // it is recursive(has sub queues) - typeValueCache.put(QueueInfo.class, QueueInfo.newInstance("root", 1.0f, + typeValueCache.put(QueueInfo.class, QueueInfo. + newInstance("root", "root", 1.0f, 1.0f, 0.1f, null, null, QueueState.RUNNING, ImmutableSet.of("x", "y"), "x && y", null, false, -1.0f, null, false)); generateByNewInstance(QueueStatistics.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 7aa8e012df6f3..2d85d618a7d54 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -716,6 +716,7 @@ protected QueueInfo getQueueInfo() { // TODO, improve this QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class); queueInfo.setQueueName(queueName); + queueInfo.setQueuePath(queuePath); queueInfo.setAccessibleNodeLabels(accessibleLabels); queueInfo.setCapacity(queueCapacities.getCapacity()); queueInfo.setMaximumCapacity(queueCapacities.getMaximumCapacity()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java index 90e12ac416bd6..1304a88d02da8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java @@ -59,23 +59,26 @@ public void setUp() throws IOException { rmContext = mock(RMContext.class); scheduler = mock(MutableConfScheduler.class); when(rmContext.getScheduler()).thenReturn(scheduler); - mockQueue("a", scheduler); - mockQueue("b", scheduler); - mockQueue("b1", scheduler); + mockQueue("a", "root.a", scheduler); + mockQueue("b", "root.b", scheduler); + mockQueue("b1", "root.b1", scheduler); } - private void mockQueue(String queueName, MutableConfScheduler scheduler) + private void mockQueue(String queueName, + String queuePath, MutableConfScheduler confScheduler) throws IOException { - QueueInfo queueInfo = QueueInfo.newInstance(queueName, 0, 0, 0, null, null, + QueueInfo queueInfo = QueueInfo. + newInstance(queueName, queuePath, 0, 0, + 0, null, null, null, null, null, null, false, -1.0f, null, false); - when(scheduler.getQueueInfo(eq(queueName), anyBoolean(), anyBoolean())) + when(confScheduler.getQueueInfo(eq(queueName), anyBoolean(), anyBoolean())) .thenReturn(queueInfo); Queue queue = mock(Queue.class); when(queue.hasAccess(eq(QueueACL.ADMINISTER_QUEUE), eq(GOOD_USER))) .thenReturn(true); when(queue.hasAccess(eq(QueueACL.ADMINISTER_QUEUE), eq(BAD_USER))) .thenReturn(false); - when(scheduler.getQueue(eq(queueName))).thenReturn(queue); + when(confScheduler.getQueue(eq(queueName))).thenReturn(queue); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java index 93d52bdb79050..2a8f57956bada 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java @@ -216,7 +216,8 @@ private Queue createQueue(String name, Queue parent) { private Queue createQueue(String name, Queue parent, float capacity) { QueueMetrics metrics = QueueMetrics.forQueue(name, parent, false, conf); - QueueInfo queueInfo = QueueInfo.newInstance(name, capacity, 1.0f, 0, null, + QueueInfo queueInfo = QueueInfo.newInstance(name, + "root." + name, capacity, 1.0f, 0, null, null, QueueState.RUNNING, null, "", null, false, -1.0f, null, false); ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics); Queue queue = mock(Queue.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index d5d89cae86644..b6c08021b96b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -1146,6 +1146,8 @@ public void testCapacitySchedulerInfo() throws Exception { QueueInfo queueInfo = resourceManager.getResourceScheduler().getQueueInfo("a", true, true); Assert.assertEquals("Queue Name should be a", "a", queueInfo.getQueueName()); + Assert.assertEquals("Queue Path should be root.a", "root.a", + queueInfo.getQueuePath()); Assert.assertEquals("Child Queues size should be 2", 2, queueInfo.getChildQueues().size()); @@ -4362,12 +4364,16 @@ public void testDefaultNodeLabelExpressionQueueConfig() throws Exception { QueueInfo queueInfoA = cs.getQueueInfo("a", true, false); Assert.assertEquals("Queue Name should be a", "a", queueInfoA.getQueueName()); + Assert.assertEquals("Queue Path should be root.a", "root.a", + queueInfoA.getQueuePath()); Assert.assertEquals("Default Node Label Expression should be x", "x", queueInfoA.getDefaultNodeLabelExpression()); QueueInfo queueInfoB = cs.getQueueInfo("b", true, false); Assert.assertEquals("Queue Name should be b", "b", queueInfoB.getQueueName()); + Assert.assertEquals("Queue Path should be root.b", "root.b", + queueInfoB.getQueuePath()); Assert.assertEquals("Default Node Label Expression should be y", "y", queueInfoB.getDefaultNodeLabelExpression()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java index 45c411f81c129..f493fe6078af9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java @@ -876,6 +876,41 @@ public void testAutoCreateQueueAfterRemoval() throws Exception { b.getQueueResourceQuotas().getEffectiveMinResource().getMemorySize()); } + @Test + public void testQueueInfoIfAmbiguousQueueNames() throws Exception { + startScheduler(); + + AbstractCSQueue b = (AbstractCSQueue) cs. + getQueue("root.b"); + Assert.assertFalse(b.isDynamicQueue()); + Assert.assertEquals("root.b", + b.getQueueInfo().getQueuePath()); + + createQueue("root.a.b.b"); + + AbstractCSQueue bAutoParent = (AbstractCSQueue) cs. + getQueue("root.a.b"); + Assert.assertTrue(bAutoParent.isDynamicQueue()); + Assert.assertTrue(bAutoParent.hasChildQueues()); + Assert.assertEquals("root.a.b", + bAutoParent.getQueueInfo().getQueuePath()); + + AbstractCSQueue bAutoLeafQueue = + (AbstractCSQueue) cs.getQueue("root.a.b.b"); + Assert.assertTrue(bAutoLeafQueue.isDynamicQueue()); + Assert.assertFalse(bAutoLeafQueue.hasChildQueues()); + Assert.assertEquals("root.a.b.b", + bAutoLeafQueue.getQueueInfo().getQueuePath()); + + // Make sure all queue name are ambiguous + Assert.assertEquals("b", + b.getQueueInfo().getQueueName()); + Assert.assertEquals("b", + bAutoParent.getQueueInfo().getQueueName()); + Assert.assertEquals("b", + bAutoLeafQueue.getQueueInfo().getQueueName()); + } + protected LeafQueue createQueue(String queuePath) throws YarnException { return autoQueueHandler.autoCreateQueue( CSQueueUtils.extractQueuePath(queuePath)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 1a45908ee9170..6262d85598fbb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -4786,7 +4786,7 @@ public void testApplicationQueuePercent() // Queue "test" consumes 100% of the cluster, so its capacity and absolute // capacity are both 1.0f. - Queue queue = createQueue("test", null, 1.0f, 1.0f, res); + Queue queue = createQueue("test", "root.test", null, 1.0f, 1.0f, res); final String user = "user1"; FiCaSchedulerApp app = new FiCaSchedulerApp(appAttId, user, queue, @@ -4803,7 +4803,7 @@ public void testApplicationQueuePercent() // Queue "test2" is a child of root and its capacity is 50% of root. As a // child of root, its absolute capaicty is also 50%. - queue = createQueue("test2", null, 0.5f, 0.5f, + queue = createQueue("test2", "root.test2", null, 0.5f, 0.5f, Resources.divideAndCeil(dominantResourceCalculator, res, 2)); app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext); @@ -4816,7 +4816,8 @@ public void testApplicationQueuePercent() // Queue "test2.1" is 50% of queue "test2", which is 50% of the cluster. // Therefore, "test2.1" capacity is 50% and absolute capacity is 25%. - AbstractCSQueue qChild = createQueue("test2.1", queue, 0.5f, 0.25f, + AbstractCSQueue qChild = + createQueue("test2.1", "root.test2.1", queue, 0.5f, 0.25f, Resources.divideAndCeil(dominantResourceCalculator, res, 4)); app = new FiCaSchedulerApp(appAttId, user, qChild, qChild.getAbstractUsersManager(), rmContext); @@ -4828,7 +4829,7 @@ public void testApplicationQueuePercent() app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f); // test that queueUsagePercentage returns neither NaN nor Infinite - AbstractCSQueue zeroQueue = createQueue("test2.2", null, + AbstractCSQueue zeroQueue = createQueue("test2.2", "root.test2.2", null, Float.MIN_VALUE, Float.MIN_VALUE, Resources.multiply(res, Float.MIN_VALUE)); app = new FiCaSchedulerApp(appAttId, user, zeroQueue, @@ -4901,10 +4902,12 @@ private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { return attId; } - private AbstractCSQueue createQueue(String name, Queue parent, float capacity, + private AbstractCSQueue + createQueue(String name, String path, Queue parent, float capacity, float absCap, Resource res) { CSQueueMetrics metrics = CSQueueMetrics.forQueue(name, parent, false, cs.getConf()); - QueueInfo queueInfo = QueueInfo.newInstance(name, capacity, 1.0f, 0, null, + QueueInfo queueInfo = QueueInfo. + newInstance(name, path, capacity, 1.0f, 0, null, null, QueueState.RUNNING, null, "", null, false, -1.0f, null, false); ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics); AbstractCSQueue queue = mock(AbstractCSQueue.class); From 8e97690ca7623989f25c59382f2fb6528084fecf Mon Sep 17 00:00:00 2001 From: litao Date: Tue, 9 Mar 2021 00:50:51 +0800 Subject: [PATCH 067/666] HDFS-15873. Add namenode address in logs for block report (#2743) --- .../hadoop/hdfs/server/datanode/BPServiceActor.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 0cc9fac8df7a2..3a5436196e834 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -436,7 +436,8 @@ List blockReport(long fullBrLeaseId) throws IOException { final int nCmds = cmds.size(); LOG.info((success ? "S" : "Uns") + "uccessfully sent block report 0x" + - Long.toHexString(reportId) + ", containing " + reports.length + + Long.toHexString(reportId) + " to namenode: " + nnAddr + + ", containing " + reports.length + " storage report(s), of which we sent " + numReportsSent + "." + " The reports had " + totalBlockCount + " total blocks and used " + numRPCs + @@ -930,10 +931,12 @@ void reRegister() throws IOException { void triggerBlockReport(BlockReportOptions options) { if (options.isIncremental()) { - LOG.info(bpos.toString() + ": scheduling an incremental block report."); + LOG.info(bpos.toString() + ": scheduling an incremental block report " + + "to namenode: " + nnAddr + "."); ibrManager.triggerIBR(true); } else { - LOG.info(bpos.toString() + ": scheduling a full block report."); + LOG.info(bpos.toString() + ": scheduling a full block report " + + "to namenode: " + nnAddr + "."); synchronized(ibrManager) { scheduler.forceFullBlockReportNow(); ibrManager.notifyAll(); From 89a56fca71853dcaf7215f7b0db6cb0f5f35643b Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Tue, 9 Mar 2021 11:10:10 +0900 Subject: [PATCH 068/666] HADOOP-17557. skip-dir option is not processed by Yetus. Contributed by Ahmed Hussein. --- dev-support/bin/test-patch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/bin/test-patch b/dev-support/bin/test-patch index 8ff8119b3e086..5faf472d325e8 100755 --- a/dev-support/bin/test-patch +++ b/dev-support/bin/test-patch @@ -15,4 +15,4 @@ # limitations under the License. BINDIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE-0}")" >/dev/null && pwd -P) -exec "${BINDIR}/yetus-wrapper" test-patch --project=hadoop --skip-dir=dev-support "$@" +exec "${BINDIR}/yetus-wrapper" test-patch --project=hadoop --skip-dirs=dev-support "$@" From 48bcd0fd080cd4dab12b2d4ac1e6ce62fae152e0 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 9 Mar 2021 11:28:24 +0100 Subject: [PATCH 069/666] YARN-10676. Improve code quality in TestTimelineAuthenticationFilterForV1. Contributed by Szilard Nemeth. --- ...TestTimelineAuthenticationFilterForV1.java | 98 ++++++++++++------- 1 file changed, 62 insertions(+), 36 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java index 0e1310eb69627..cf2db2f8c6fb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java @@ -18,8 +18,6 @@ package org.apache.hadoop.yarn.server.timeline.security; -import static org.junit.Assert.assertTrue; - import java.io.File; import java.security.PrivilegedExceptionAction; import java.util.Arrays; @@ -49,12 +47,16 @@ import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore; import org.apache.hadoop.yarn.server.timeline.TimelineStore; import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_HTTP_AUTH_PREFIX; +import static org.junit.Assert.fail; + import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Test cases for authentication via TimelineAuthenticationFilter while @@ -62,17 +64,20 @@ */ @RunWith(Parameterized.class) public class TestTimelineAuthenticationFilterForV1 { + private static final Logger LOG = + LoggerFactory.getLogger(TestTimelineAuthenticationFilterForV1.class); private static final String FOO_USER = "foo"; private static final String BAR_USER = "bar"; private static final String HTTP_USER = "HTTP"; + private static final String PRINCIPAL = HTTP_USER + "/localhost"; private static final File TEST_ROOT_DIR = new File( System.getProperty("test.build.dir", "target/test-dir"), TestTimelineAuthenticationFilterForV1.class.getName() + "-root"); - private static File httpSpnegoKeytabFile = new File( + private static final File httpSpnegoKeytabFile = new File( KerberosTestUtils.getKeytabFile()); - private static String httpSpnegoPrincipal = + private static final String httpSpnegoPrincipal = KerberosTestUtils.getServerPrincipal(); private static final String BASEDIR = System.getProperty("test.build.dir", "target/test-dir") + "/" @@ -100,16 +105,16 @@ public static void setup() { testMiniKDC = new MiniKdc(MiniKdc.createConf(), TEST_ROOT_DIR); testMiniKDC.start(); testMiniKDC.createPrincipal( - httpSpnegoKeytabFile, HTTP_USER + "/localhost"); + httpSpnegoKeytabFile, PRINCIPAL); } catch (Exception e) { - assertTrue("Couldn't setup MiniKDC", false); + LOG.error("Failed to setup MiniKDC", e); + fail("Couldn't setup MiniKDC"); } try { testTimelineServer = new ApplicationHistoryServer(); conf = new Configuration(false); - conf.setStrings(TIMELINE_HTTP_AUTH_PREFIX + "type", - "kerberos"); + conf.setStrings(TIMELINE_HTTP_AUTH_PREFIX + "type", "kerberos"); conf.set(TIMELINE_HTTP_AUTH_PREFIX + KerberosAuthenticationHandler.PRINCIPAL, httpSpnegoPrincipal); conf.set(TIMELINE_HTTP_AUTH_PREFIX + @@ -150,8 +155,8 @@ public static void setup() { testTimelineServer.init(conf); testTimelineServer.start(); } catch (Exception e) { - e.printStackTrace(); - assertTrue("Couldn't setup TimelineServer", false); + LOG.error("Failed to setup TimelineServer", e); + fail("Couldn't setup TimelineServer"); } } @@ -181,7 +186,7 @@ public static void tearDown() throws Exception { @Test public void testPutTimelineEntities() throws Exception { - KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable() { + KerberosTestUtils.doAs(PRINCIPAL, new Callable() { @Override public Void call() throws Exception { TimelineClient client = createTimelineClientForUGI(); @@ -191,11 +196,16 @@ public Void call() throws Exception { entityToStore.setEntityId("entity1"); entityToStore.setStartTime(0L); TimelinePutResponse putResponse = client.putEntities(entityToStore); - Assert.assertEquals(0, putResponse.getErrors().size()); + if (putResponse.getErrors().size() > 0) { + LOG.error("putResponse errors: {}", putResponse.getErrors()); + } + Assert.assertTrue("There were some errors in the putResponse", + putResponse.getErrors().isEmpty()); TimelineEntity entityToRead = testTimelineServer.getTimelineStore().getEntity("entity1", TestTimelineAuthenticationFilterForV1.class.getName(), null); - Assert.assertNotNull(entityToRead); + Assert.assertNotNull("Timeline entity should not be null", + entityToRead); return null; } }); @@ -203,7 +213,7 @@ public Void call() throws Exception { @Test public void testPutDomains() throws Exception { - KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable() { + KerberosTestUtils.doAs(PRINCIPAL, new Callable() { @Override public Void call() throws Exception { TimelineClient client = createTimelineClientForUGI(); @@ -216,7 +226,8 @@ public Void call() throws Exception { TimelineDomain domainToRead = testTimelineServer.getTimelineStore().getDomain( TestTimelineAuthenticationFilterForV1.class.getName()); - Assert.assertNotNull(domainToRead); + Assert.assertNotNull("Timeline domain should not be null", + domainToRead); return null; } }); @@ -225,7 +236,7 @@ public Void call() throws Exception { @Test public void testDelegationTokenOperations() throws Exception { TimelineClient httpUserClient = - KerberosTestUtils.doAs(HTTP_USER + "/localhost", + KerberosTestUtils.doAs(PRINCIPAL, new Callable() { @Override public TimelineClient call() throws Exception { @@ -233,43 +244,51 @@ public TimelineClient call() throws Exception { } }); UserGroupInformation httpUser = - KerberosTestUtils.doAs(HTTP_USER + "/localhost", + KerberosTestUtils.doAs(PRINCIPAL, new Callable() { @Override public UserGroupInformation call() throws Exception { return UserGroupInformation.getCurrentUser(); } }); + // Let HTTP user to get the delegation for itself Token token = httpUserClient.getDelegationToken(httpUser.getShortUserName()); - Assert.assertNotNull(token); + Assert.assertNotNull("Delegation token should not be null", token); TimelineDelegationTokenIdentifier tDT = token.decodeIdentifier(); - Assert.assertNotNull(tDT); - Assert.assertEquals(new Text(HTTP_USER), tDT.getOwner()); + Assert.assertNotNull("Delegation token identifier should not be null", + tDT); + Assert.assertEquals("Owner of delegation token identifier does not match", + new Text(HTTP_USER), tDT.getOwner()); // Renew token - Assert.assertFalse(token.getService().toString().isEmpty()); + Assert.assertFalse("Service field of token should not be empty", + token.getService().toString().isEmpty()); // Renew the token from the token service address long renewTime1 = httpUserClient.renewDelegationToken(token); Thread.sleep(100); token.setService(new Text()); - Assert.assertTrue(token.getService().toString().isEmpty()); - // If the token service address is not avaiable, it still can be renewed + Assert.assertTrue("Service field of token should be empty", + token.getService().toString().isEmpty()); + // If the token service address is not available, it still can be renewed // from the configured address long renewTime2 = httpUserClient.renewDelegationToken(token); - Assert.assertTrue(renewTime1 < renewTime2); + Assert.assertTrue("renewTime2 should be later than renewTime1", + renewTime1 < renewTime2); // Cancel token - Assert.assertTrue(token.getService().toString().isEmpty()); - // If the token service address is not avaiable, it still can be canceled + Assert.assertTrue("Service field of token should be empty", + token.getService().toString().isEmpty()); + // If the token service address is not available, it still can be canceled // from the configured address httpUserClient.cancelDelegationToken(token); // Renew should not be successful because the token is canceled try { httpUserClient.renewDelegationToken(token); - Assert.fail(); + Assert.fail("Renew of delegation token should not be successful"); } catch (Exception e) { + LOG.info("Exception while renewing delegation token", e); Assert.assertTrue(e.getMessage().contains( "Renewal request for unknown token")); } @@ -280,33 +299,39 @@ public UserGroupInformation call() throws Exception { TimelineClient fooUserClient = fooUgi.doAs( new PrivilegedExceptionAction() { @Override - public TimelineClient run() throws Exception { + public TimelineClient run() { return createTimelineClientForUGI(); } }); token = fooUserClient.getDelegationToken(httpUser.getShortUserName()); - Assert.assertNotNull(token); + Assert.assertNotNull("Delegation token should not be null", token); tDT = token.decodeIdentifier(); - Assert.assertNotNull(tDT); - Assert.assertEquals(new Text(FOO_USER), tDT.getOwner()); - Assert.assertEquals(new Text(HTTP_USER), tDT.getRealUser()); + Assert.assertNotNull("Delegation token identifier should not be null", + tDT); + Assert.assertEquals("Owner of delegation token is not the expected", + new Text(FOO_USER), tDT.getOwner()); + Assert.assertEquals("Real user of delegation token is not the expected", + new Text(HTTP_USER), tDT.getRealUser()); // Renew token as the renewer final Token tokenToRenew = token; renewTime1 = httpUserClient.renewDelegationToken(tokenToRenew); renewTime2 = httpUserClient.renewDelegationToken(tokenToRenew); - Assert.assertTrue(renewTime1 < renewTime2); + Assert.assertTrue("renewTime2 should be later than renewTime1", + renewTime1 < renewTime2); // Cancel token - Assert.assertFalse(tokenToRenew.getService().toString().isEmpty()); + Assert.assertFalse("Service field of token should not be empty", + tokenToRenew.getService().toString().isEmpty()); // Cancel the token from the token service address fooUserClient.cancelDelegationToken(tokenToRenew); // Renew should not be successful because the token is canceled try { httpUserClient.renewDelegationToken(tokenToRenew); - Assert.fail(); + Assert.fail("Renew of delegation token should not be successful"); } catch (Exception e) { + LOG.info("Exception while renewing delegation token", e); Assert.assertTrue( e.getMessage().contains("Renewal request for unknown token")); } @@ -324,8 +349,9 @@ public TimelineClient run() { try { barUserClient.getDelegationToken(httpUser.getShortUserName()); - Assert.fail(); + Assert.fail("Retrieval of delegation token should not be successful"); } catch (Exception e) { + LOG.info("Exception while retrieving delegation token", e); Assert.assertTrue(e.getCause() instanceof AuthorizationException || e.getCause() instanceof AuthenticationException); } From 3c2fd1a61e3bb9245c8301b47b5f8f9be5813476 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 9 Mar 2021 11:34:37 +0100 Subject: [PATCH 070/666] YARN-10675. Consolidate YARN-10672 and YARN-10447. Contributed by Szilard Nemeth. --- .../scheduler/capacity/CapacityScheduler.java | 20 +++---------------- .../scheduler/capacity/TestLeafQueue.java | 7 +++++-- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 467dacbfbb472..d259c93a1f454 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -249,8 +249,6 @@ public Configuration getConf() { private CSMaxRunningAppsEnforcer maxRunningEnforcer; - private boolean activitiesManagerEnabled = true; - public CapacityScheduler() { super(CapacityScheduler.class.getName()); this.maxRunningEnforcer = new CSMaxRunningAppsEnforcer(this); @@ -351,9 +349,7 @@ void initScheduler(Configuration configuration) throws this.workflowPriorityMappingsMgr = new WorkflowPriorityMappingsManager(); this.activitiesManager = new ActivitiesManager(rmContext); - if (activitiesManagerEnabled) { - activitiesManager.init(conf); - } + activitiesManager.init(conf); initializeQueues(this.conf); this.isLazyPreemptionEnabled = conf.getLazyPreemptionEnabled(); @@ -411,9 +407,7 @@ void initScheduler(Configuration configuration) throws private void startSchedulerThreads() { writeLock.lock(); try { - if (activitiesManagerEnabled) { - activitiesManager.start(); - } + activitiesManager.start(); if (scheduleAsynchronously) { Preconditions.checkNotNull(asyncSchedulerThreads, "asyncSchedulerThreads is null"); @@ -447,9 +441,7 @@ public void serviceStart() throws Exception { public void serviceStop() throws Exception { writeLock.lock(); try { - if (activitiesManagerEnabled) { - this.activitiesManager.stop(); - } + this.activitiesManager.stop(); if (scheduleAsynchronously && asyncSchedulerThreads != null) { for (Thread t : asyncSchedulerThreads) { t.interrupt(); @@ -3480,7 +3472,6 @@ public void setMaxRunningAppsEnforcer(CSMaxRunningAppsEnforcer enforcer) { this.maxRunningEnforcer = enforcer; } - /** * Returning true as capacity scheduler supports placement constraints. */ @@ -3489,11 +3480,6 @@ public boolean placementConstraintEnabled() { return true; } - @VisibleForTesting - public void setActivitiesManagerEnabled(boolean enabled) { - this.activitiesManagerEnabled = enabled; - } - @VisibleForTesting public void setQueueManager(CapacitySchedulerQueueManager qm) { this.queueManager = qm; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 6262d85598fbb..4b023107b39cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -162,9 +162,13 @@ private void setUpWithNodeLabels() throws Exception { private void setUpInternal(ResourceCalculator rC, boolean withNodeLabels) throws Exception { CapacityScheduler spyCs = new CapacityScheduler(); - spyCs.setActivitiesManagerEnabled(false); queues = new CSQueueStore(); cs = spy(spyCs); + + //All stub calls on the spy object of the 'cs' field should happen + //before cs.start() is invoked. See YARN-10672 for more details. + when(cs.getNumClusterNodes()).thenReturn(3); + rmContext = TestUtils.getMockRMContext(); spyRMContext = spy(rmContext); @@ -231,7 +235,6 @@ private void setUpInternal(ResourceCalculator rC, boolean withNodeLabels) when(spyRMContext.getScheduler()).thenReturn(cs); when(spyRMContext.getYarnConfiguration()) .thenReturn(new YarnConfiguration()); - when(cs.getNumClusterNodes()).thenReturn(3); cs.start(); } From cd5fe5f78649febfadd503545b66288e82de3e51 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 9 Mar 2021 12:03:53 +0100 Subject: [PATCH 071/666] YARN-10678. Try blocks without catch blocks in SLS scheduler classes can swallow other exceptions. Contributed by Szilard Nemeth. --- .../apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java | 3 +++ .../org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java | 3 +++ 2 files changed, 6 insertions(+) diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java index f34a3d3fc655c..a3182042f8476 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java @@ -119,6 +119,9 @@ public Allocation allocate(ApplicationAttemptId attemptId, containerIds, strings, strings2, updateRequests); return allocation; + } catch (Exception e) { + LOG.error("Caught exception from allocate", e); + throw e; } finally { context.stop(); schedulerMetrics.increaseSchedulerAllocationCounter(); diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java index 09ef2cf903585..c8324368444e1 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java @@ -111,6 +111,9 @@ public Allocation allocate(ApplicationAttemptId attemptId, schedulingRequests, containerIds, blacklistAdditions, blacklistRemovals, updateRequests); return allocation; + } catch (Exception e) { + LOG.error("Caught exception from allocate", e); + throw e; } finally { context.stop(); schedulerMetrics.increaseSchedulerAllocationCounter(); From 44e0c5596aec6448e642b03a7b37c409be312bd7 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 9 Mar 2021 12:53:32 +0100 Subject: [PATCH 072/666] YARN-10677. Logger of SLSFairScheduler is provided with the wrong class. Contributed by Szilard Nemeth. --- .../org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java index c8324368444e1..7298c056edad9 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java @@ -66,7 +66,8 @@ public class SLSFairScheduler extends FairScheduler new ConcurrentHashMap<>(); // logger - private static final Logger LOG = LoggerFactory.getLogger(SLSCapacityScheduler.class); + private static final Logger LOG = + LoggerFactory.getLogger(SLSFairScheduler.class); public SchedulerMetrics getSchedulerMetrics() { return schedulerMetrics; From 96599105bea2671bf04ebe76908d1b46bfbf4b55 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 9 Mar 2021 13:22:48 +0100 Subject: [PATCH 073/666] YARN-10681. Fix assertion failure message in BaseSLSRunnerTest. Contributed by Szilard Nemeth. --- .../test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java index bfbd592c42b58..1bb4710d56d16 100644 --- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java +++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java @@ -126,7 +126,7 @@ public void uncaughtException(Thread t, Throwable e) { if (!exceptionList.isEmpty()) { sls.stop(); - Assert.fail("TestSLSRunner catched exception from child thread " + Assert.fail("TestSLSRunner caught exception from child thread " + "(TaskRunner.TaskDefinition): " + exceptionList); break; } From 15d17fc4fa6d41d4aa11d93869c7e9f26d09d574 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 9 Mar 2021 14:02:12 +0100 Subject: [PATCH 074/666] YARN-10679. Better logging of uncaught exceptions throughout SLS. Contributed by Szilard Nemeth. --- .../hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java | 7 +++---- .../hadoop/yarn/sls/scheduler/SLSFairScheduler.java | 6 +++--- .../org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java | 6 +++++- .../java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java | 8 ++++++-- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java index a3182042f8476..e0cb15138f451 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java @@ -63,7 +63,6 @@ @Unstable public class SLSCapacityScheduler extends CapacityScheduler implements SchedulerWrapper,Configurable { - private Configuration conf; private Map appQueueMap = @@ -99,7 +98,7 @@ public void setConf(Configuration conf) { CapacityScheduler.class); schedulerMetrics.init(this, conf); } catch (Exception e) { - e.printStackTrace(); + LOG.error("Caught exception while initializing schedulerMetrics", e); } } } @@ -129,7 +128,7 @@ public Allocation allocate(ApplicationAttemptId attemptId, updateQueueWithAllocateRequest(allocation, attemptId, resourceRequests, containerIds); } catch (IOException e) { - e.printStackTrace(); + LOG.error("Caught exception while executing finally block", e); } } } else { @@ -376,7 +375,7 @@ public void serviceStop() throws Exception { schedulerMetrics.tearDown(); } } catch (Exception e) { - e.printStackTrace(); + LOG.error("Caught exception while stopping service", e); } super.serviceStop(); } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java index 7298c056edad9..84549bc492205 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java @@ -92,7 +92,7 @@ public void setConf(Configuration conf) { FairScheduler.class); schedulerMetrics.init(this, conf); } catch (Exception e) { - e.printStackTrace(); + LOG.error("Caught exception while initializing schedulerMetrics", e); } } } @@ -122,7 +122,7 @@ public Allocation allocate(ApplicationAttemptId attemptId, updateQueueWithAllocateRequest(allocation, attemptId, resourceRequests, containerIds); } catch (IOException e) { - e.printStackTrace(); + LOG.error("Caught exception while executing finally block", e); } } } else { @@ -332,7 +332,7 @@ public void serviceStop() throws Exception { schedulerMetrics.tearDown(); } } catch (Exception e) { - e.printStackTrace(); + LOG.error("Caught exception while stopping service", e); } super.serviceStop(); } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java index 19cfe88d1abbc..167845522b022 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java @@ -26,10 +26,14 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Private @Unstable public class TaskRunner { + private static final Logger LOG = LoggerFactory.getLogger(TaskRunner.class); + @Private @Unstable public abstract static class Task implements Runnable, Delayed { @@ -98,7 +102,7 @@ public final void run() { lastStep(); } } catch (Exception e) { - e.printStackTrace(); + LOG.error("Caught exception while executing Taskrunner", e); Thread.getDefaultUncaughtExceptionHandler() .uncaughtException(Thread.currentThread(), e); } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java index b3b7a8240383d..191c462404eb2 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java @@ -50,10 +50,14 @@ import com.codahale.metrics.Gauge; import com.codahale.metrics.Histogram; import com.codahale.metrics.MetricRegistry; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Private @Unstable public class SLSWebApp extends HttpServlet { + private static final Logger LOG = LoggerFactory.getLogger(SLSWebApp.class); + private static final long serialVersionUID = 1905162041950251407L; private transient Server server; private transient SchedulerWrapper wrapper; @@ -101,7 +105,7 @@ public class SLSWebApp extends HttpServlet { trackTemplate = IOUtils.toString( cl.getResourceAsStream("html/track.html.template"), StandardCharsets.UTF_8); } catch (IOException e) { - e.printStackTrace(); + LOG.error("Caught exception while initializing templates", e); } } @@ -166,7 +170,7 @@ public void handle(String target, Request baseRequest, printJsonTrack(request, response); } } catch (Exception e) { - e.printStackTrace(); + LOG.error("Caught exception while starting SLSWebApp", e); } } }; From 40181251424c1bc83bacbbbbcc7a53293c4254e0 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 9 Mar 2021 14:28:23 +0100 Subject: [PATCH 075/666] YARN-9615. Add dispatcher metrics to RM. Contributed by Qi Zhu. --- .../hadoop/yarn/event/AsyncDispatcher.java | 26 ++- .../hadoop/yarn/event/EventDispatcher.java | 20 +- .../yarn/metrics/DisableEventTypeMetrics.java | 42 ++++ .../hadoop/yarn/metrics/EventTypeMetrics.java | 32 +++ .../yarn/metrics/GenericEventTypeMetrics.java | 160 +++++++++++++++ .../yarn/event/TestAsyncDispatcher.java | 182 +++++++++++++++++- .../GenericEventTypeMetricsManager.java | 42 ++++ .../resourcemanager/ResourceManager.java | 18 +- .../resourcemanager/TestResourceManager.java | 2 + .../scheduler/TestSchedulerHealth.java | 2 + .../capacity/TestCapacityScheduler.java | 1 + 11 files changed, 521 insertions(+), 6 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index f9deab06ff27f..667515d00c104 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -26,6 +26,9 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import org.apache.hadoop.yarn.metrics.EventTypeMetrics; +import org.apache.hadoop.yarn.util.Clock; +import org.apache.hadoop.yarn.util.MonotonicClock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; @@ -85,6 +88,11 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher { protected final Map, EventHandler> eventDispatchers; private boolean exitOnDispatchException = true; + private Map, + EventTypeMetrics> eventTypeMetricsMap; + + private Clock clock = new MonotonicClock(); + /** * The thread name for dispatcher. */ @@ -98,6 +106,8 @@ public AsyncDispatcher(BlockingQueue eventQueue) { super("Dispatcher"); this.eventQueue = eventQueue; this.eventDispatchers = new HashMap, EventHandler>(); + this.eventTypeMetricsMap = new HashMap, + EventTypeMetrics>(); } /** @@ -135,7 +145,16 @@ public void run() { return; } if (event != null) { - dispatch(event); + if (eventTypeMetricsMap. + get(event.getType().getDeclaringClass()) != null) { + long startTime = clock.getTime(); + dispatch(event); + eventTypeMetricsMap.get(event.getType().getDeclaringClass()) + .increment(event.getType(), + clock.getTime() - startTime); + } else { + dispatch(event); + } if (printTrigger) { //Log the latest dispatch event type // may cause the too many events queued @@ -369,4 +388,9 @@ protected boolean isDrained() { protected boolean isStopped() { return stopped; } + + public void addMetrics(EventTypeMetrics metrics, + Class eventClass) { + eventTypeMetricsMap.put(eventClass, metrics); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java index 0969e999718e1..f51bae63dc6ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java @@ -19,6 +19,9 @@ package org.apache.hadoop.yarn.event; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.metrics.EventTypeMetrics; +import org.apache.hadoop.yarn.util.Clock; +import org.apache.hadoop.yarn.util.MonotonicClock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; @@ -47,12 +50,15 @@ public class EventDispatcher extends private final Thread eventProcessor; private volatile boolean stopped = false; private boolean shouldExitOnError = true; + private EventTypeMetrics metrics; private static final Logger LOG = LoggerFactory.getLogger(EventDispatcher.class); private static final Marker FATAL = MarkerFactory.getMarker("FATAL"); + private Clock clock = new MonotonicClock(); + private final class EventProcessor implements Runnable { @Override public void run() { @@ -68,7 +74,14 @@ public void run() { } try { - handler.handle(event); + if (metrics != null) { + long startTime = clock.getTime(); + handler.handle(event); + metrics.increment(event.getType(), + clock.getTime() - startTime); + } else { + handler.handle(event); + } } catch (Throwable t) { // An error occurred, but we are shutting down anyway. // If it was an InterruptedException, the very act of @@ -136,4 +149,9 @@ public void handle(T event) { public void disableExitOnError() { shouldExitOnError = false; } + + public void setMetrics(EventTypeMetrics metrics) { + this.metrics = metrics; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java new file mode 100644 index 0000000000000..7b4af0c3e094d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.metrics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.annotation.Metrics; + +@InterfaceAudience.Private +@Metrics(context="yarn") +public class DisableEventTypeMetrics implements EventTypeMetrics { + @Override + public void increment(Enum type, long processingTimeUs) { + //nop + return; + } + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + //nop + return; + } + + @Override + public long get(Enum type) { + return 0; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java new file mode 100644 index 0000000000000..7a7e4f5489073 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.metrics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.annotation.Metrics; + +@InterfaceAudience.Private +@Metrics(context="yarn") +public interface EventTypeMetrics> + extends MetricsSource { + + void increment(T type, long processingTimeUs); + + long get(T type); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java new file mode 100644 index 0000000000000..464edb2778248 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.metrics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.EnumMap; + +@InterfaceAudience.Private +@Metrics(context="yarn") +public class GenericEventTypeMetrics> + implements EventTypeMetrics { + + static final Logger LOG = + LoggerFactory.getLogger(GenericEventTypeMetrics.class); + + private final EnumMap eventCountMetrics; + private final EnumMap processingTimeMetrics; + private final MetricsRegistry registry; + private final MetricsSystem ms; + private final MetricsInfo info; + private final Class enumClass; + + private boolean isInitialized = false; + + public GenericEventTypeMetrics(MetricsInfo info, MetricsSystem ms, + final T[] enums, Class enumClass) { + this.enumClass = enumClass; + this.eventCountMetrics = new EnumMap<>(this.enumClass); + this.processingTimeMetrics = new EnumMap<>(this.enumClass); + this.ms = ms; + this.info = info; + this.registry = new MetricsRegistry(this.info); + + //Initialize enum + for (final T type : enums) { + String eventCountMetricsName = + type.toString() + "_" + "event_count"; + String processingTimeMetricsName = + type.toString() + "_" + "processing_time"; + eventCountMetrics.put(type, this.registry. + newGauge(eventCountMetricsName, eventCountMetricsName, 0L)); + processingTimeMetrics.put(type, this.registry. + newGauge(processingTimeMetricsName, processingTimeMetricsName, 0L)); + } + } + + public synchronized GenericEventTypeMetrics registerMetrics() { + if (!isInitialized) { + // Register with the MetricsSystems + if (this.ms != null) { + LOG.info("Registering GenericEventTypeMetrics"); + ms.register(info.name(), + info.description(), this); + isInitialized = true; + } + } + return this; + } + + @Override + public void increment(T type, long processingTimeUs) { + if (eventCountMetrics.get(type) != null) { + eventCountMetrics.get(type).incr(); + processingTimeMetrics.get(type).incr(processingTimeUs); + } + } + + @Override + public long get(T type) { + return eventCountMetrics.get(type).value(); + } + + public long getTotalProcessingTime(T type) { + return processingTimeMetrics.get(type).value(); + } + + public EnumMap getEventCountMetrics() { + return eventCountMetrics; + } + + public EnumMap getProcessingTimeMetrics() { + return processingTimeMetrics; + } + + public MetricsRegistry getRegistry() { + return registry; + } + + public MetricsInfo getInfo() { + return info; + } + + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + registry.snapshot(collector.addRecord(registry.info()), all); + } + + public Class getEnumClass() { + return enumClass; + } + + /** Builder class for GenericEventTypeMetrics. */ + public static class EventTypeMetricsBuilder>{ + public EventTypeMetricsBuilder() { + } + + public EventTypeMetricsBuilder setEnumClass(Class enumClassValue) { + this.enumClass = enumClassValue; + return this; + } + + public EventTypeMetricsBuilder setEnums(T[] enumsValue) { + this.enums = enumsValue.clone(); + return this; + } + + public EventTypeMetricsBuilder setInfo(MetricsInfo infoValue) { + this.info = infoValue; + return this; + } + + public EventTypeMetricsBuilder setMs(MetricsSystem msValue) { + this.ms = msValue; + return this; + } + + public GenericEventTypeMetrics build() { + return new GenericEventTypeMetrics(info, ms, enums, enumClass); + } + + private MetricsSystem ms; + private MetricsInfo info; + private Class enumClass; + private T[] enums; + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java index 55ddd12fce98e..7d2572a4c118f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java @@ -20,9 +20,20 @@ import java.lang.reflect.Field; import java.lang.reflect.Modifier; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; + +import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.metrics2.MetricsRecord; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.metrics.GenericEventTypeMetrics; import org.slf4j.Logger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -30,6 +41,7 @@ import org.junit.Assert; import org.junit.Test; +import static org.apache.hadoop.metrics2.lib.Interns.info; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.*; @@ -118,7 +130,7 @@ public void handle(Event event) { } private enum TestEnum { - TestEventType + TestEventType, TestEventType2 } @SuppressWarnings({ "rawtypes", "unchecked" }) @@ -230,5 +242,171 @@ public void testPrintDispatcherEventDetailsAvoidDeadLoopInternal() } } -} + @Test + public void testMetricsForDispatcher() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + AsyncDispatcher dispatcher = null; + + try { + dispatcher = new AsyncDispatcher("RM Event dispatcher"); + + GenericEventTypeMetrics genericEventTypeMetrics = + new GenericEventTypeMetrics.EventTypeMetricsBuilder() + .setMs(DefaultMetricsSystem.instance()) + .setInfo(info("GenericEventTypeMetrics for " + + TestEnum.class.getName(), + "Metrics for " + dispatcher.getName())) + .setEnumClass(TestEnum.class) + .setEnums(TestEnum.class.getEnumConstants()) + .build().registerMetrics(); + + // We can the metrics enabled for TestEnum + dispatcher.addMetrics(genericEventTypeMetrics, + genericEventTypeMetrics.getEnumClass()); + dispatcher.init(conf); + + // Register handler + dispatcher.register(TestEnum.class, new TestHandler()); + dispatcher.start(); + + for (int i = 0; i < 3; ++i) { + Event event = mock(Event.class); + when(event.getType()).thenReturn(TestEnum.TestEventType); + dispatcher.getEventHandler().handle(event); + } + + for (int i = 0; i < 2; ++i) { + Event event = mock(Event.class); + when(event.getType()).thenReturn(TestEnum.TestEventType2); + dispatcher.getEventHandler().handle(event); + } + + // Check event type count. + GenericTestUtils.waitFor(() -> genericEventTypeMetrics. + get(TestEnum.TestEventType) == 3, 1000, 10000); + + GenericTestUtils.waitFor(() -> genericEventTypeMetrics. + get(TestEnum.TestEventType2) == 2, 1000, 10000); + + // Check time spend. + Assert.assertTrue(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType) + >= 1500*3); + Assert.assertTrue(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType) + < 1500*4); + + Assert.assertTrue(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType2) + >= 1500*2); + Assert.assertTrue(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType2) + < 1500*3); + + // Make sure metrics consistent. + Assert.assertEquals(Long.toString(genericEventTypeMetrics. + get(TestEnum.TestEventType)), + genericEventTypeMetrics. + getRegistry().get("TestEventType_event_count").toString()); + Assert.assertEquals(Long.toString(genericEventTypeMetrics. + get(TestEnum.TestEventType2)), + genericEventTypeMetrics. + getRegistry().get("TestEventType2_event_count").toString()); + Assert.assertEquals(Long.toString(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType)), + genericEventTypeMetrics. + getRegistry().get("TestEventType_processing_time").toString()); + Assert.assertEquals(Long.toString(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType2)), + genericEventTypeMetrics. + getRegistry().get("TestEventType2_processing_time").toString()); + + } finally { + dispatcher.close(); + } + + } + + @Test + public void testDispatcherMetricsHistogram() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + AsyncDispatcher dispatcher = null; + + try { + dispatcher = new AsyncDispatcher("RM Event dispatcher"); + + GenericEventTypeMetrics genericEventTypeMetrics = + new GenericEventTypeMetrics.EventTypeMetricsBuilder() + .setMs(DefaultMetricsSystem.instance()) + .setInfo(info("GenericEventTypeMetrics for " + + TestEnum.class.getName(), + "Metrics for " + dispatcher.getName())) + .setEnumClass(TestEnum.class) + .setEnums(TestEnum.class.getEnumConstants()) + .build().registerMetrics(); + + // We can the metrics enabled for TestEnum + dispatcher.addMetrics(genericEventTypeMetrics, + genericEventTypeMetrics.getEnumClass()); + dispatcher.init(conf); + + // Register handler + dispatcher.register(TestEnum.class, new TestHandler()); + dispatcher.start(); + + for (int i = 0; i < 3; ++i) { + Event event = mock(Event.class); + when(event.getType()).thenReturn(TestEnum.TestEventType); + dispatcher.getEventHandler().handle(event); + } + + for (int i = 0; i < 2; ++i) { + Event event = mock(Event.class); + when(event.getType()).thenReturn(TestEnum.TestEventType2); + dispatcher.getEventHandler().handle(event); + } + + // Check event type count. + GenericTestUtils.waitFor(() -> genericEventTypeMetrics. + get(TestEnum.TestEventType) == 3, 1000, 10000); + + GenericTestUtils.waitFor(() -> genericEventTypeMetrics. + get(TestEnum.TestEventType2) == 2, 1000, 10000); + + // submit actual values + Map expectedValues = new HashMap<>(); + expectedValues.put("TestEventType_event_count", + genericEventTypeMetrics.get(TestEnum.TestEventType)); + expectedValues.put("TestEventType_processing_time", + genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType)); + expectedValues.put("TestEventType2_event_count", + genericEventTypeMetrics.get(TestEnum.TestEventType2)); + expectedValues.put("TestEventType2_processing_time", + genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType2)); + Set testResults = new HashSet<>(); + MetricsCollectorImpl collector = new MetricsCollectorImpl(); + genericEventTypeMetrics.getMetrics(collector, true); + + for (MetricsRecord record : collector.getRecords()) { + for (AbstractMetric metric : record.metrics()) { + String metricName = metric.name(); + if (expectedValues.containsKey(metricName)) { + Long expectedValue = expectedValues.get(metricName); + Assert.assertEquals( + "Metric " + metricName + " doesn't have expected value", + expectedValue, metric.value()); + testResults.add(metricName); + } + } + } + Assert.assertEquals(expectedValues.keySet(), testResults); + + } finally { + dispatcher.close(); + } + + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java new file mode 100644 index 0000000000000..8fda9b7f38ac5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager; + +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.yarn.metrics.GenericEventTypeMetrics; + +import static org.apache.hadoop.metrics2.lib.Interns.info; + +public final class GenericEventTypeMetricsManager { + + private GenericEventTypeMetricsManager() { + // nothing to do + } + + // Construct a GenericEventTypeMetrics for dispatcher + public static > GenericEventTypeMetrics + create(String dispatcherName, Class eventTypeClass) { + return new GenericEventTypeMetrics.EventTypeMetricsBuilder() + .setMs(DefaultMetricsSystem.instance()) + .setInfo(info("GenericEventTypeMetrics for " + eventTypeClass.getName(), + "Metrics for " + dispatcherName)) + .setEnumClass(eventTypeClass) + .setEnums(eventTypeClass.getEnumConstants()) + .build().registerMetrics(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index c315b33541557..6d2a9fed08b91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -21,6 +21,7 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.spi.container.servlet.ServletContainer; +import org.apache.hadoop.yarn.metrics.GenericEventTypeMetrics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; @@ -62,6 +63,7 @@ import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventDispatcher; import org.apache.hadoop.yarn.event.EventHandler; + import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager; @@ -449,11 +451,23 @@ protected void setRMStateStore(RMStateStore rmStore) { } protected EventHandler createSchedulerEventDispatcher() { - return new EventDispatcher(this.scheduler, "SchedulerEventDispatcher"); + EventDispatcher dispatcher = new + EventDispatcher(this.scheduler, "SchedulerEventDispatcher"); + dispatcher. + setMetrics(GenericEventTypeMetricsManager. + create(dispatcher.getName(), SchedulerEventType.class)); + return dispatcher; } protected Dispatcher createDispatcher() { - return new AsyncDispatcher("RM Event dispatcher"); + AsyncDispatcher dispatcher = new AsyncDispatcher("RM Event dispatcher"); + GenericEventTypeMetrics genericEventTypeMetrics = + GenericEventTypeMetricsManager. + create(dispatcher.getName(), NodesListManagerEventType.class); + // We can add more + dispatcher.addMetrics(genericEventTypeMetrics, + genericEventTypeMetrics.getEnumClass()); + return dispatcher; } protected ResourceScheduler createScheduler() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java index 1cb5e1d0e7633..b9c5500a7d20b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.http.lib.StaticUserWebFilter; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.UserGroupInformation; @@ -73,6 +74,7 @@ public class TestResourceManager { public void setUp() throws Exception { YarnConfiguration conf = new YarnConfiguration(); UserGroupInformation.setConfiguration(conf); + DefaultMetricsSystem.setMiniClusterMode(true); resourceManager = new ResourceManager(); resourceManager.init(conf); resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java index a75be7745fb88..933eaf91fe4f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -52,6 +53,7 @@ public class TestSchedulerHealth { private ResourceManager resourceManager; public void setup() { + DefaultMetricsSystem.setMiniClusterMode(true); resourceManager = new ResourceManager() { @Override protected RMNodeLabelsManager createNodeLabelManager() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index b6c08021b96b9..d8dc4dc61a9e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -212,6 +212,7 @@ public class TestCapacityScheduler extends CapacitySchedulerTestBase { @Before public void setUp() throws Exception { ResourceUtils.resetResourceTypes(new Configuration()); + DefaultMetricsSystem.setMiniClusterMode(true); resourceManager = new ResourceManager() { @Override protected RMNodeLabelsManager createNodeLabelManager() { From 105dfd57f0e3d175b75a0f3052505dc6b043e2df Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 9 Mar 2021 14:32:02 +0100 Subject: [PATCH 076/666] Revert "YARN-9615. Add dispatcher metrics to RM. Contributed by Qi Zhu." This reverts commit 369f75b7a7f62ba454cc2a102ded6c2c947e80e5. --- .../hadoop/yarn/event/AsyncDispatcher.java | 26 +-- .../hadoop/yarn/event/EventDispatcher.java | 20 +- .../yarn/metrics/DisableEventTypeMetrics.java | 42 ---- .../hadoop/yarn/metrics/EventTypeMetrics.java | 32 --- .../yarn/metrics/GenericEventTypeMetrics.java | 160 --------------- .../yarn/event/TestAsyncDispatcher.java | 182 +----------------- .../GenericEventTypeMetricsManager.java | 42 ---- .../resourcemanager/ResourceManager.java | 18 +- .../resourcemanager/TestResourceManager.java | 2 - .../scheduler/TestSchedulerHealth.java | 2 - .../capacity/TestCapacityScheduler.java | 1 - 11 files changed, 6 insertions(+), 521 deletions(-) delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index 667515d00c104..f9deab06ff27f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -26,9 +26,6 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; -import org.apache.hadoop.yarn.metrics.EventTypeMetrics; -import org.apache.hadoop.yarn.util.Clock; -import org.apache.hadoop.yarn.util.MonotonicClock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; @@ -88,11 +85,6 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher { protected final Map, EventHandler> eventDispatchers; private boolean exitOnDispatchException = true; - private Map, - EventTypeMetrics> eventTypeMetricsMap; - - private Clock clock = new MonotonicClock(); - /** * The thread name for dispatcher. */ @@ -106,8 +98,6 @@ public AsyncDispatcher(BlockingQueue eventQueue) { super("Dispatcher"); this.eventQueue = eventQueue; this.eventDispatchers = new HashMap, EventHandler>(); - this.eventTypeMetricsMap = new HashMap, - EventTypeMetrics>(); } /** @@ -145,16 +135,7 @@ public void run() { return; } if (event != null) { - if (eventTypeMetricsMap. - get(event.getType().getDeclaringClass()) != null) { - long startTime = clock.getTime(); - dispatch(event); - eventTypeMetricsMap.get(event.getType().getDeclaringClass()) - .increment(event.getType(), - clock.getTime() - startTime); - } else { - dispatch(event); - } + dispatch(event); if (printTrigger) { //Log the latest dispatch event type // may cause the too many events queued @@ -388,9 +369,4 @@ protected boolean isDrained() { protected boolean isStopped() { return stopped; } - - public void addMetrics(EventTypeMetrics metrics, - Class eventClass) { - eventTypeMetricsMap.put(eventClass, metrics); - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java index f51bae63dc6ec..0969e999718e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java @@ -19,9 +19,6 @@ package org.apache.hadoop.yarn.event; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.yarn.metrics.EventTypeMetrics; -import org.apache.hadoop.yarn.util.Clock; -import org.apache.hadoop.yarn.util.MonotonicClock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; @@ -50,15 +47,12 @@ public class EventDispatcher extends private final Thread eventProcessor; private volatile boolean stopped = false; private boolean shouldExitOnError = true; - private EventTypeMetrics metrics; private static final Logger LOG = LoggerFactory.getLogger(EventDispatcher.class); private static final Marker FATAL = MarkerFactory.getMarker("FATAL"); - private Clock clock = new MonotonicClock(); - private final class EventProcessor implements Runnable { @Override public void run() { @@ -74,14 +68,7 @@ public void run() { } try { - if (metrics != null) { - long startTime = clock.getTime(); - handler.handle(event); - metrics.increment(event.getType(), - clock.getTime() - startTime); - } else { - handler.handle(event); - } + handler.handle(event); } catch (Throwable t) { // An error occurred, but we are shutting down anyway. // If it was an InterruptedException, the very act of @@ -149,9 +136,4 @@ public void handle(T event) { public void disableExitOnError() { shouldExitOnError = false; } - - public void setMetrics(EventTypeMetrics metrics) { - this.metrics = metrics; - } - } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java deleted file mode 100644 index 7b4af0c3e094d..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.metrics; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.annotation.Metrics; - -@InterfaceAudience.Private -@Metrics(context="yarn") -public class DisableEventTypeMetrics implements EventTypeMetrics { - @Override - public void increment(Enum type, long processingTimeUs) { - //nop - return; - } - @Override - public void getMetrics(MetricsCollector collector, boolean all) { - //nop - return; - } - - @Override - public long get(Enum type) { - return 0; - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java deleted file mode 100644 index 7a7e4f5489073..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.metrics; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.annotation.Metrics; - -@InterfaceAudience.Private -@Metrics(context="yarn") -public interface EventTypeMetrics> - extends MetricsSource { - - void increment(T type, long processingTimeUs); - - long get(T type); -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java deleted file mode 100644 index 464edb2778248..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.metrics; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.EnumMap; - -@InterfaceAudience.Private -@Metrics(context="yarn") -public class GenericEventTypeMetrics> - implements EventTypeMetrics { - - static final Logger LOG = - LoggerFactory.getLogger(GenericEventTypeMetrics.class); - - private final EnumMap eventCountMetrics; - private final EnumMap processingTimeMetrics; - private final MetricsRegistry registry; - private final MetricsSystem ms; - private final MetricsInfo info; - private final Class enumClass; - - private boolean isInitialized = false; - - public GenericEventTypeMetrics(MetricsInfo info, MetricsSystem ms, - final T[] enums, Class enumClass) { - this.enumClass = enumClass; - this.eventCountMetrics = new EnumMap<>(this.enumClass); - this.processingTimeMetrics = new EnumMap<>(this.enumClass); - this.ms = ms; - this.info = info; - this.registry = new MetricsRegistry(this.info); - - //Initialize enum - for (final T type : enums) { - String eventCountMetricsName = - type.toString() + "_" + "event_count"; - String processingTimeMetricsName = - type.toString() + "_" + "processing_time"; - eventCountMetrics.put(type, this.registry. - newGauge(eventCountMetricsName, eventCountMetricsName, 0L)); - processingTimeMetrics.put(type, this.registry. - newGauge(processingTimeMetricsName, processingTimeMetricsName, 0L)); - } - } - - public synchronized GenericEventTypeMetrics registerMetrics() { - if (!isInitialized) { - // Register with the MetricsSystems - if (this.ms != null) { - LOG.info("Registering GenericEventTypeMetrics"); - ms.register(info.name(), - info.description(), this); - isInitialized = true; - } - } - return this; - } - - @Override - public void increment(T type, long processingTimeUs) { - if (eventCountMetrics.get(type) != null) { - eventCountMetrics.get(type).incr(); - processingTimeMetrics.get(type).incr(processingTimeUs); - } - } - - @Override - public long get(T type) { - return eventCountMetrics.get(type).value(); - } - - public long getTotalProcessingTime(T type) { - return processingTimeMetrics.get(type).value(); - } - - public EnumMap getEventCountMetrics() { - return eventCountMetrics; - } - - public EnumMap getProcessingTimeMetrics() { - return processingTimeMetrics; - } - - public MetricsRegistry getRegistry() { - return registry; - } - - public MetricsInfo getInfo() { - return info; - } - - @Override - public void getMetrics(MetricsCollector collector, boolean all) { - registry.snapshot(collector.addRecord(registry.info()), all); - } - - public Class getEnumClass() { - return enumClass; - } - - /** Builder class for GenericEventTypeMetrics. */ - public static class EventTypeMetricsBuilder>{ - public EventTypeMetricsBuilder() { - } - - public EventTypeMetricsBuilder setEnumClass(Class enumClassValue) { - this.enumClass = enumClassValue; - return this; - } - - public EventTypeMetricsBuilder setEnums(T[] enumsValue) { - this.enums = enumsValue.clone(); - return this; - } - - public EventTypeMetricsBuilder setInfo(MetricsInfo infoValue) { - this.info = infoValue; - return this; - } - - public EventTypeMetricsBuilder setMs(MetricsSystem msValue) { - this.ms = msValue; - return this; - } - - public GenericEventTypeMetrics build() { - return new GenericEventTypeMetrics(info, ms, enums, enumClass); - } - - private MetricsSystem ms; - private MetricsInfo info; - private Class enumClass; - private T[] enums; - } -} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java index 7d2572a4c118f..55ddd12fce98e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java @@ -20,20 +20,9 @@ import java.lang.reflect.Field; import java.lang.reflect.Modifier; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; - -import org.apache.hadoop.metrics2.AbstractMetric; -import org.apache.hadoop.metrics2.MetricsRecord; -import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.yarn.metrics.GenericEventTypeMetrics; import org.slf4j.Logger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -41,7 +30,6 @@ import org.junit.Assert; import org.junit.Test; -import static org.apache.hadoop.metrics2.lib.Interns.info; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.*; @@ -130,7 +118,7 @@ public void handle(Event event) { } private enum TestEnum { - TestEventType, TestEventType2 + TestEventType } @SuppressWarnings({ "rawtypes", "unchecked" }) @@ -242,171 +230,5 @@ public void testPrintDispatcherEventDetailsAvoidDeadLoopInternal() } } - @Test - public void testMetricsForDispatcher() throws Exception { - YarnConfiguration conf = new YarnConfiguration(); - AsyncDispatcher dispatcher = null; - - try { - dispatcher = new AsyncDispatcher("RM Event dispatcher"); - - GenericEventTypeMetrics genericEventTypeMetrics = - new GenericEventTypeMetrics.EventTypeMetricsBuilder() - .setMs(DefaultMetricsSystem.instance()) - .setInfo(info("GenericEventTypeMetrics for " - + TestEnum.class.getName(), - "Metrics for " + dispatcher.getName())) - .setEnumClass(TestEnum.class) - .setEnums(TestEnum.class.getEnumConstants()) - .build().registerMetrics(); - - // We can the metrics enabled for TestEnum - dispatcher.addMetrics(genericEventTypeMetrics, - genericEventTypeMetrics.getEnumClass()); - dispatcher.init(conf); - - // Register handler - dispatcher.register(TestEnum.class, new TestHandler()); - dispatcher.start(); - - for (int i = 0; i < 3; ++i) { - Event event = mock(Event.class); - when(event.getType()).thenReturn(TestEnum.TestEventType); - dispatcher.getEventHandler().handle(event); - } - - for (int i = 0; i < 2; ++i) { - Event event = mock(Event.class); - when(event.getType()).thenReturn(TestEnum.TestEventType2); - dispatcher.getEventHandler().handle(event); - } - - // Check event type count. - GenericTestUtils.waitFor(() -> genericEventTypeMetrics. - get(TestEnum.TestEventType) == 3, 1000, 10000); - - GenericTestUtils.waitFor(() -> genericEventTypeMetrics. - get(TestEnum.TestEventType2) == 2, 1000, 10000); - - // Check time spend. - Assert.assertTrue(genericEventTypeMetrics. - getTotalProcessingTime(TestEnum.TestEventType) - >= 1500*3); - Assert.assertTrue(genericEventTypeMetrics. - getTotalProcessingTime(TestEnum.TestEventType) - < 1500*4); - - Assert.assertTrue(genericEventTypeMetrics. - getTotalProcessingTime(TestEnum.TestEventType2) - >= 1500*2); - Assert.assertTrue(genericEventTypeMetrics. - getTotalProcessingTime(TestEnum.TestEventType2) - < 1500*3); - - // Make sure metrics consistent. - Assert.assertEquals(Long.toString(genericEventTypeMetrics. - get(TestEnum.TestEventType)), - genericEventTypeMetrics. - getRegistry().get("TestEventType_event_count").toString()); - Assert.assertEquals(Long.toString(genericEventTypeMetrics. - get(TestEnum.TestEventType2)), - genericEventTypeMetrics. - getRegistry().get("TestEventType2_event_count").toString()); - Assert.assertEquals(Long.toString(genericEventTypeMetrics. - getTotalProcessingTime(TestEnum.TestEventType)), - genericEventTypeMetrics. - getRegistry().get("TestEventType_processing_time").toString()); - Assert.assertEquals(Long.toString(genericEventTypeMetrics. - getTotalProcessingTime(TestEnum.TestEventType2)), - genericEventTypeMetrics. - getRegistry().get("TestEventType2_processing_time").toString()); - - } finally { - dispatcher.close(); - } - - } - - @Test - public void testDispatcherMetricsHistogram() throws Exception { - YarnConfiguration conf = new YarnConfiguration(); - AsyncDispatcher dispatcher = null; - - try { - dispatcher = new AsyncDispatcher("RM Event dispatcher"); - - GenericEventTypeMetrics genericEventTypeMetrics = - new GenericEventTypeMetrics.EventTypeMetricsBuilder() - .setMs(DefaultMetricsSystem.instance()) - .setInfo(info("GenericEventTypeMetrics for " - + TestEnum.class.getName(), - "Metrics for " + dispatcher.getName())) - .setEnumClass(TestEnum.class) - .setEnums(TestEnum.class.getEnumConstants()) - .build().registerMetrics(); - - // We can the metrics enabled for TestEnum - dispatcher.addMetrics(genericEventTypeMetrics, - genericEventTypeMetrics.getEnumClass()); - dispatcher.init(conf); - - // Register handler - dispatcher.register(TestEnum.class, new TestHandler()); - dispatcher.start(); - - for (int i = 0; i < 3; ++i) { - Event event = mock(Event.class); - when(event.getType()).thenReturn(TestEnum.TestEventType); - dispatcher.getEventHandler().handle(event); - } - - for (int i = 0; i < 2; ++i) { - Event event = mock(Event.class); - when(event.getType()).thenReturn(TestEnum.TestEventType2); - dispatcher.getEventHandler().handle(event); - } - - // Check event type count. - GenericTestUtils.waitFor(() -> genericEventTypeMetrics. - get(TestEnum.TestEventType) == 3, 1000, 10000); - - GenericTestUtils.waitFor(() -> genericEventTypeMetrics. - get(TestEnum.TestEventType2) == 2, 1000, 10000); - - // submit actual values - Map expectedValues = new HashMap<>(); - expectedValues.put("TestEventType_event_count", - genericEventTypeMetrics.get(TestEnum.TestEventType)); - expectedValues.put("TestEventType_processing_time", - genericEventTypeMetrics. - getTotalProcessingTime(TestEnum.TestEventType)); - expectedValues.put("TestEventType2_event_count", - genericEventTypeMetrics.get(TestEnum.TestEventType2)); - expectedValues.put("TestEventType2_processing_time", - genericEventTypeMetrics. - getTotalProcessingTime(TestEnum.TestEventType2)); - Set testResults = new HashSet<>(); +} - MetricsCollectorImpl collector = new MetricsCollectorImpl(); - genericEventTypeMetrics.getMetrics(collector, true); - - for (MetricsRecord record : collector.getRecords()) { - for (AbstractMetric metric : record.metrics()) { - String metricName = metric.name(); - if (expectedValues.containsKey(metricName)) { - Long expectedValue = expectedValues.get(metricName); - Assert.assertEquals( - "Metric " + metricName + " doesn't have expected value", - expectedValue, metric.value()); - testResults.add(metricName); - } - } - } - Assert.assertEquals(expectedValues.keySet(), testResults); - - } finally { - dispatcher.close(); - } - - } -} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java deleted file mode 100644 index 8fda9b7f38ac5..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.resourcemanager; - -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.yarn.metrics.GenericEventTypeMetrics; - -import static org.apache.hadoop.metrics2.lib.Interns.info; - -public final class GenericEventTypeMetricsManager { - - private GenericEventTypeMetricsManager() { - // nothing to do - } - - // Construct a GenericEventTypeMetrics for dispatcher - public static > GenericEventTypeMetrics - create(String dispatcherName, Class eventTypeClass) { - return new GenericEventTypeMetrics.EventTypeMetricsBuilder() - .setMs(DefaultMetricsSystem.instance()) - .setInfo(info("GenericEventTypeMetrics for " + eventTypeClass.getName(), - "Metrics for " + dispatcherName)) - .setEnumClass(eventTypeClass) - .setEnums(eventTypeClass.getEnumConstants()) - .build().registerMetrics(); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 6d2a9fed08b91..c315b33541557 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -21,7 +21,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.spi.container.servlet.ServletContainer; -import org.apache.hadoop.yarn.metrics.GenericEventTypeMetrics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; @@ -63,7 +62,6 @@ import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventDispatcher; import org.apache.hadoop.yarn.event.EventHandler; - import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager; @@ -451,23 +449,11 @@ protected void setRMStateStore(RMStateStore rmStore) { } protected EventHandler createSchedulerEventDispatcher() { - EventDispatcher dispatcher = new - EventDispatcher(this.scheduler, "SchedulerEventDispatcher"); - dispatcher. - setMetrics(GenericEventTypeMetricsManager. - create(dispatcher.getName(), SchedulerEventType.class)); - return dispatcher; + return new EventDispatcher(this.scheduler, "SchedulerEventDispatcher"); } protected Dispatcher createDispatcher() { - AsyncDispatcher dispatcher = new AsyncDispatcher("RM Event dispatcher"); - GenericEventTypeMetrics genericEventTypeMetrics = - GenericEventTypeMetricsManager. - create(dispatcher.getName(), NodesListManagerEventType.class); - // We can add more - dispatcher.addMetrics(genericEventTypeMetrics, - genericEventTypeMetrics.getEnumClass()); - return dispatcher; + return new AsyncDispatcher("RM Event dispatcher"); } protected ResourceScheduler createScheduler() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java index b9c5500a7d20b..1cb5e1d0e7633 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java @@ -29,7 +29,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.http.lib.StaticUserWebFilter; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.UserGroupInformation; @@ -74,7 +73,6 @@ public class TestResourceManager { public void setUp() throws Exception { YarnConfiguration conf = new YarnConfiguration(); UserGroupInformation.setConfiguration(conf); - DefaultMetricsSystem.setMiniClusterMode(true); resourceManager = new ResourceManager(); resourceManager.init(conf); resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java index 933eaf91fe4f2..a75be7745fb88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java @@ -18,7 +18,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -53,7 +52,6 @@ public class TestSchedulerHealth { private ResourceManager resourceManager; public void setup() { - DefaultMetricsSystem.setMiniClusterMode(true); resourceManager = new ResourceManager() { @Override protected RMNodeLabelsManager createNodeLabelManager() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index d8dc4dc61a9e8..b6c08021b96b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -212,7 +212,6 @@ public class TestCapacityScheduler extends CapacitySchedulerTestBase { @Before public void setUp() throws Exception { ResourceUtils.resetResourceTypes(new Configuration()); - DefaultMetricsSystem.setMiniClusterMode(true); resourceManager = new ResourceManager() { @Override protected RMNodeLabelsManager createNodeLabelManager() { From c2fd94a93005595c1140f42b30a265792c2b4b17 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 9 Mar 2021 14:33:14 +0100 Subject: [PATCH 077/666] YARN-9615. Add dispatcher metrics to RM. Contributed by Jonathan Hung and Qi Zhu. --- .../hadoop/yarn/event/AsyncDispatcher.java | 26 ++- .../hadoop/yarn/event/EventDispatcher.java | 20 +- .../yarn/metrics/DisableEventTypeMetrics.java | 42 ++++ .../hadoop/yarn/metrics/EventTypeMetrics.java | 32 +++ .../yarn/metrics/GenericEventTypeMetrics.java | 160 +++++++++++++++ .../yarn/event/TestAsyncDispatcher.java | 182 +++++++++++++++++- .../GenericEventTypeMetricsManager.java | 42 ++++ .../resourcemanager/ResourceManager.java | 18 +- .../resourcemanager/TestResourceManager.java | 2 + .../scheduler/TestSchedulerHealth.java | 2 + .../capacity/TestCapacityScheduler.java | 1 + 11 files changed, 521 insertions(+), 6 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index f9deab06ff27f..667515d00c104 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -26,6 +26,9 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import org.apache.hadoop.yarn.metrics.EventTypeMetrics; +import org.apache.hadoop.yarn.util.Clock; +import org.apache.hadoop.yarn.util.MonotonicClock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; @@ -85,6 +88,11 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher { protected final Map, EventHandler> eventDispatchers; private boolean exitOnDispatchException = true; + private Map, + EventTypeMetrics> eventTypeMetricsMap; + + private Clock clock = new MonotonicClock(); + /** * The thread name for dispatcher. */ @@ -98,6 +106,8 @@ public AsyncDispatcher(BlockingQueue eventQueue) { super("Dispatcher"); this.eventQueue = eventQueue; this.eventDispatchers = new HashMap, EventHandler>(); + this.eventTypeMetricsMap = new HashMap, + EventTypeMetrics>(); } /** @@ -135,7 +145,16 @@ public void run() { return; } if (event != null) { - dispatch(event); + if (eventTypeMetricsMap. + get(event.getType().getDeclaringClass()) != null) { + long startTime = clock.getTime(); + dispatch(event); + eventTypeMetricsMap.get(event.getType().getDeclaringClass()) + .increment(event.getType(), + clock.getTime() - startTime); + } else { + dispatch(event); + } if (printTrigger) { //Log the latest dispatch event type // may cause the too many events queued @@ -369,4 +388,9 @@ protected boolean isDrained() { protected boolean isStopped() { return stopped; } + + public void addMetrics(EventTypeMetrics metrics, + Class eventClass) { + eventTypeMetricsMap.put(eventClass, metrics); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java index 0969e999718e1..f51bae63dc6ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java @@ -19,6 +19,9 @@ package org.apache.hadoop.yarn.event; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.metrics.EventTypeMetrics; +import org.apache.hadoop.yarn.util.Clock; +import org.apache.hadoop.yarn.util.MonotonicClock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; @@ -47,12 +50,15 @@ public class EventDispatcher extends private final Thread eventProcessor; private volatile boolean stopped = false; private boolean shouldExitOnError = true; + private EventTypeMetrics metrics; private static final Logger LOG = LoggerFactory.getLogger(EventDispatcher.class); private static final Marker FATAL = MarkerFactory.getMarker("FATAL"); + private Clock clock = new MonotonicClock(); + private final class EventProcessor implements Runnable { @Override public void run() { @@ -68,7 +74,14 @@ public void run() { } try { - handler.handle(event); + if (metrics != null) { + long startTime = clock.getTime(); + handler.handle(event); + metrics.increment(event.getType(), + clock.getTime() - startTime); + } else { + handler.handle(event); + } } catch (Throwable t) { // An error occurred, but we are shutting down anyway. // If it was an InterruptedException, the very act of @@ -136,4 +149,9 @@ public void handle(T event) { public void disableExitOnError() { shouldExitOnError = false; } + + public void setMetrics(EventTypeMetrics metrics) { + this.metrics = metrics; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java new file mode 100644 index 0000000000000..7b4af0c3e094d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/DisableEventTypeMetrics.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.metrics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.annotation.Metrics; + +@InterfaceAudience.Private +@Metrics(context="yarn") +public class DisableEventTypeMetrics implements EventTypeMetrics { + @Override + public void increment(Enum type, long processingTimeUs) { + //nop + return; + } + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + //nop + return; + } + + @Override + public long get(Enum type) { + return 0; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java new file mode 100644 index 0000000000000..7a7e4f5489073 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/EventTypeMetrics.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.metrics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.annotation.Metrics; + +@InterfaceAudience.Private +@Metrics(context="yarn") +public interface EventTypeMetrics> + extends MetricsSource { + + void increment(T type, long processingTimeUs); + + long get(T type); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java new file mode 100644 index 0000000000000..464edb2778248 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/GenericEventTypeMetrics.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.metrics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.EnumMap; + +@InterfaceAudience.Private +@Metrics(context="yarn") +public class GenericEventTypeMetrics> + implements EventTypeMetrics { + + static final Logger LOG = + LoggerFactory.getLogger(GenericEventTypeMetrics.class); + + private final EnumMap eventCountMetrics; + private final EnumMap processingTimeMetrics; + private final MetricsRegistry registry; + private final MetricsSystem ms; + private final MetricsInfo info; + private final Class enumClass; + + private boolean isInitialized = false; + + public GenericEventTypeMetrics(MetricsInfo info, MetricsSystem ms, + final T[] enums, Class enumClass) { + this.enumClass = enumClass; + this.eventCountMetrics = new EnumMap<>(this.enumClass); + this.processingTimeMetrics = new EnumMap<>(this.enumClass); + this.ms = ms; + this.info = info; + this.registry = new MetricsRegistry(this.info); + + //Initialize enum + for (final T type : enums) { + String eventCountMetricsName = + type.toString() + "_" + "event_count"; + String processingTimeMetricsName = + type.toString() + "_" + "processing_time"; + eventCountMetrics.put(type, this.registry. + newGauge(eventCountMetricsName, eventCountMetricsName, 0L)); + processingTimeMetrics.put(type, this.registry. + newGauge(processingTimeMetricsName, processingTimeMetricsName, 0L)); + } + } + + public synchronized GenericEventTypeMetrics registerMetrics() { + if (!isInitialized) { + // Register with the MetricsSystems + if (this.ms != null) { + LOG.info("Registering GenericEventTypeMetrics"); + ms.register(info.name(), + info.description(), this); + isInitialized = true; + } + } + return this; + } + + @Override + public void increment(T type, long processingTimeUs) { + if (eventCountMetrics.get(type) != null) { + eventCountMetrics.get(type).incr(); + processingTimeMetrics.get(type).incr(processingTimeUs); + } + } + + @Override + public long get(T type) { + return eventCountMetrics.get(type).value(); + } + + public long getTotalProcessingTime(T type) { + return processingTimeMetrics.get(type).value(); + } + + public EnumMap getEventCountMetrics() { + return eventCountMetrics; + } + + public EnumMap getProcessingTimeMetrics() { + return processingTimeMetrics; + } + + public MetricsRegistry getRegistry() { + return registry; + } + + public MetricsInfo getInfo() { + return info; + } + + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + registry.snapshot(collector.addRecord(registry.info()), all); + } + + public Class getEnumClass() { + return enumClass; + } + + /** Builder class for GenericEventTypeMetrics. */ + public static class EventTypeMetricsBuilder>{ + public EventTypeMetricsBuilder() { + } + + public EventTypeMetricsBuilder setEnumClass(Class enumClassValue) { + this.enumClass = enumClassValue; + return this; + } + + public EventTypeMetricsBuilder setEnums(T[] enumsValue) { + this.enums = enumsValue.clone(); + return this; + } + + public EventTypeMetricsBuilder setInfo(MetricsInfo infoValue) { + this.info = infoValue; + return this; + } + + public EventTypeMetricsBuilder setMs(MetricsSystem msValue) { + this.ms = msValue; + return this; + } + + public GenericEventTypeMetrics build() { + return new GenericEventTypeMetrics(info, ms, enums, enumClass); + } + + private MetricsSystem ms; + private MetricsInfo info; + private Class enumClass; + private T[] enums; + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java index 55ddd12fce98e..7d2572a4c118f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java @@ -20,9 +20,20 @@ import java.lang.reflect.Field; import java.lang.reflect.Modifier; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; + +import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.metrics2.MetricsRecord; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.metrics.GenericEventTypeMetrics; import org.slf4j.Logger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -30,6 +41,7 @@ import org.junit.Assert; import org.junit.Test; +import static org.apache.hadoop.metrics2.lib.Interns.info; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.*; @@ -118,7 +130,7 @@ public void handle(Event event) { } private enum TestEnum { - TestEventType + TestEventType, TestEventType2 } @SuppressWarnings({ "rawtypes", "unchecked" }) @@ -230,5 +242,171 @@ public void testPrintDispatcherEventDetailsAvoidDeadLoopInternal() } } -} + @Test + public void testMetricsForDispatcher() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + AsyncDispatcher dispatcher = null; + + try { + dispatcher = new AsyncDispatcher("RM Event dispatcher"); + + GenericEventTypeMetrics genericEventTypeMetrics = + new GenericEventTypeMetrics.EventTypeMetricsBuilder() + .setMs(DefaultMetricsSystem.instance()) + .setInfo(info("GenericEventTypeMetrics for " + + TestEnum.class.getName(), + "Metrics for " + dispatcher.getName())) + .setEnumClass(TestEnum.class) + .setEnums(TestEnum.class.getEnumConstants()) + .build().registerMetrics(); + + // We can the metrics enabled for TestEnum + dispatcher.addMetrics(genericEventTypeMetrics, + genericEventTypeMetrics.getEnumClass()); + dispatcher.init(conf); + + // Register handler + dispatcher.register(TestEnum.class, new TestHandler()); + dispatcher.start(); + + for (int i = 0; i < 3; ++i) { + Event event = mock(Event.class); + when(event.getType()).thenReturn(TestEnum.TestEventType); + dispatcher.getEventHandler().handle(event); + } + + for (int i = 0; i < 2; ++i) { + Event event = mock(Event.class); + when(event.getType()).thenReturn(TestEnum.TestEventType2); + dispatcher.getEventHandler().handle(event); + } + + // Check event type count. + GenericTestUtils.waitFor(() -> genericEventTypeMetrics. + get(TestEnum.TestEventType) == 3, 1000, 10000); + + GenericTestUtils.waitFor(() -> genericEventTypeMetrics. + get(TestEnum.TestEventType2) == 2, 1000, 10000); + + // Check time spend. + Assert.assertTrue(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType) + >= 1500*3); + Assert.assertTrue(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType) + < 1500*4); + + Assert.assertTrue(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType2) + >= 1500*2); + Assert.assertTrue(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType2) + < 1500*3); + + // Make sure metrics consistent. + Assert.assertEquals(Long.toString(genericEventTypeMetrics. + get(TestEnum.TestEventType)), + genericEventTypeMetrics. + getRegistry().get("TestEventType_event_count").toString()); + Assert.assertEquals(Long.toString(genericEventTypeMetrics. + get(TestEnum.TestEventType2)), + genericEventTypeMetrics. + getRegistry().get("TestEventType2_event_count").toString()); + Assert.assertEquals(Long.toString(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType)), + genericEventTypeMetrics. + getRegistry().get("TestEventType_processing_time").toString()); + Assert.assertEquals(Long.toString(genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType2)), + genericEventTypeMetrics. + getRegistry().get("TestEventType2_processing_time").toString()); + + } finally { + dispatcher.close(); + } + + } + + @Test + public void testDispatcherMetricsHistogram() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + AsyncDispatcher dispatcher = null; + + try { + dispatcher = new AsyncDispatcher("RM Event dispatcher"); + + GenericEventTypeMetrics genericEventTypeMetrics = + new GenericEventTypeMetrics.EventTypeMetricsBuilder() + .setMs(DefaultMetricsSystem.instance()) + .setInfo(info("GenericEventTypeMetrics for " + + TestEnum.class.getName(), + "Metrics for " + dispatcher.getName())) + .setEnumClass(TestEnum.class) + .setEnums(TestEnum.class.getEnumConstants()) + .build().registerMetrics(); + + // We can the metrics enabled for TestEnum + dispatcher.addMetrics(genericEventTypeMetrics, + genericEventTypeMetrics.getEnumClass()); + dispatcher.init(conf); + + // Register handler + dispatcher.register(TestEnum.class, new TestHandler()); + dispatcher.start(); + + for (int i = 0; i < 3; ++i) { + Event event = mock(Event.class); + when(event.getType()).thenReturn(TestEnum.TestEventType); + dispatcher.getEventHandler().handle(event); + } + + for (int i = 0; i < 2; ++i) { + Event event = mock(Event.class); + when(event.getType()).thenReturn(TestEnum.TestEventType2); + dispatcher.getEventHandler().handle(event); + } + + // Check event type count. + GenericTestUtils.waitFor(() -> genericEventTypeMetrics. + get(TestEnum.TestEventType) == 3, 1000, 10000); + + GenericTestUtils.waitFor(() -> genericEventTypeMetrics. + get(TestEnum.TestEventType2) == 2, 1000, 10000); + + // submit actual values + Map expectedValues = new HashMap<>(); + expectedValues.put("TestEventType_event_count", + genericEventTypeMetrics.get(TestEnum.TestEventType)); + expectedValues.put("TestEventType_processing_time", + genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType)); + expectedValues.put("TestEventType2_event_count", + genericEventTypeMetrics.get(TestEnum.TestEventType2)); + expectedValues.put("TestEventType2_processing_time", + genericEventTypeMetrics. + getTotalProcessingTime(TestEnum.TestEventType2)); + Set testResults = new HashSet<>(); + MetricsCollectorImpl collector = new MetricsCollectorImpl(); + genericEventTypeMetrics.getMetrics(collector, true); + + for (MetricsRecord record : collector.getRecords()) { + for (AbstractMetric metric : record.metrics()) { + String metricName = metric.name(); + if (expectedValues.containsKey(metricName)) { + Long expectedValue = expectedValues.get(metricName); + Assert.assertEquals( + "Metric " + metricName + " doesn't have expected value", + expectedValue, metric.value()); + testResults.add(metricName); + } + } + } + Assert.assertEquals(expectedValues.keySet(), testResults); + + } finally { + dispatcher.close(); + } + + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java new file mode 100644 index 0000000000000..8fda9b7f38ac5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/GenericEventTypeMetricsManager.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager; + +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.yarn.metrics.GenericEventTypeMetrics; + +import static org.apache.hadoop.metrics2.lib.Interns.info; + +public final class GenericEventTypeMetricsManager { + + private GenericEventTypeMetricsManager() { + // nothing to do + } + + // Construct a GenericEventTypeMetrics for dispatcher + public static > GenericEventTypeMetrics + create(String dispatcherName, Class eventTypeClass) { + return new GenericEventTypeMetrics.EventTypeMetricsBuilder() + .setMs(DefaultMetricsSystem.instance()) + .setInfo(info("GenericEventTypeMetrics for " + eventTypeClass.getName(), + "Metrics for " + dispatcherName)) + .setEnumClass(eventTypeClass) + .setEnums(eventTypeClass.getEnumConstants()) + .build().registerMetrics(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index c315b33541557..6d2a9fed08b91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -21,6 +21,7 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.spi.container.servlet.ServletContainer; +import org.apache.hadoop.yarn.metrics.GenericEventTypeMetrics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; @@ -62,6 +63,7 @@ import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventDispatcher; import org.apache.hadoop.yarn.event.EventHandler; + import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager; @@ -449,11 +451,23 @@ protected void setRMStateStore(RMStateStore rmStore) { } protected EventHandler createSchedulerEventDispatcher() { - return new EventDispatcher(this.scheduler, "SchedulerEventDispatcher"); + EventDispatcher dispatcher = new + EventDispatcher(this.scheduler, "SchedulerEventDispatcher"); + dispatcher. + setMetrics(GenericEventTypeMetricsManager. + create(dispatcher.getName(), SchedulerEventType.class)); + return dispatcher; } protected Dispatcher createDispatcher() { - return new AsyncDispatcher("RM Event dispatcher"); + AsyncDispatcher dispatcher = new AsyncDispatcher("RM Event dispatcher"); + GenericEventTypeMetrics genericEventTypeMetrics = + GenericEventTypeMetricsManager. + create(dispatcher.getName(), NodesListManagerEventType.class); + // We can add more + dispatcher.addMetrics(genericEventTypeMetrics, + genericEventTypeMetrics.getEnumClass()); + return dispatcher; } protected ResourceScheduler createScheduler() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java index 1cb5e1d0e7633..b9c5500a7d20b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.http.lib.StaticUserWebFilter; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.UserGroupInformation; @@ -73,6 +74,7 @@ public class TestResourceManager { public void setUp() throws Exception { YarnConfiguration conf = new YarnConfiguration(); UserGroupInformation.setConfiguration(conf); + DefaultMetricsSystem.setMiniClusterMode(true); resourceManager = new ResourceManager(); resourceManager.init(conf); resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java index a75be7745fb88..933eaf91fe4f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -52,6 +53,7 @@ public class TestSchedulerHealth { private ResourceManager resourceManager; public void setup() { + DefaultMetricsSystem.setMiniClusterMode(true); resourceManager = new ResourceManager() { @Override protected RMNodeLabelsManager createNodeLabelManager() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index b6c08021b96b9..d8dc4dc61a9e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -212,6 +212,7 @@ public class TestCapacityScheduler extends CapacitySchedulerTestBase { @Before public void setUp() throws Exception { ResourceUtils.resetResourceTypes(new Configuration()); + DefaultMetricsSystem.setMiniClusterMode(true); resourceManager = new ResourceManager() { @Override protected RMNodeLabelsManager createNodeLabelManager() { From 6ce1e772d98254b4d59d458a920a6cdf07e4a8d5 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Tue, 9 Mar 2021 20:26:07 +0530 Subject: [PATCH 078/666] YARN-10671.Fix Typo in TestSchedulingRequestContainerAllocation. Contributed by D M Murali Krishna Reddy. --- .../capacity/TestSchedulingRequestContainerAllocation.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java index f963e61c91271..a4248c5dae584 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java @@ -862,7 +862,7 @@ public void testInterAppConstraintsWithNamespaces() throws Exception { try { rm.start(); - MockNM nm1 = rm.registerNode("192.168.0.1:1234:", 100*GB, 100); + MockNM nm1 = rm.registerNode("192.168.0.1:1234", 100*GB, 100); MockNM nm2 = rm.registerNode("192.168.0.2:1234", 100*GB, 100); MockNM nm3 = rm.registerNode("192.168.0.3:1234", 100*GB, 100); MockNM nm4 = rm.registerNode("192.168.0.4:1234", 100*GB, 100); From a286511ca9e615ccf73cafa537a5784689918f30 Mon Sep 17 00:00:00 2001 From: Chao Sun Date: Tue, 9 Mar 2021 12:01:29 -0800 Subject: [PATCH 079/666] HADOOP-16080. hadoop-aws does not work with hadoop-client-api. (#2522) Contributed by Chao Sun. (Cherry-picked via PR #2575) --- .../apache/hadoop/fs/cosn/CosNFileSystem.java | 6 +++--- .../BlockingThreadPoolExecutorService.java | 5 +---- .../util/SemaphoredDelegatingExecutor.java | 21 +++++++++---------- .../hadoop/fs/TestFileSystemCaching.java | 6 ++++-- .../fs/aliyun/oss/AliyunOSSFileSystem.java | 5 ++--- .../dev-support/findbugs-exclude.xml | 6 ++++++ .../apache/hadoop/fs/s3a/S3AFileSystem.java | 3 ++- .../hadoop/fs/s3a/impl/DeleteOperation.java | 4 +++- .../hadoop/fs/s3a/impl/StoreContext.java | 12 ++++++----- .../fs/s3a/impl/StoreContextBuilder.java | 7 +++---- .../fs/s3a/s3guard/DynamoDBMetadataStore.java | 19 ++++++++++------- ...TestBlockingThreadPoolExecutorService.java | 4 ++-- .../s3a/impl/ITestPartialRenamesDeletes.java | 12 ++++++----- 13 files changed, 61 insertions(+), 49 deletions(-) diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java index 94b10ad44012b..4dda1260731d3 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java @@ -28,11 +28,11 @@ import java.util.HashMap; import java.util.Set; import java.util.TreeSet; +import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -71,8 +71,8 @@ public class CosNFileSystem extends FileSystem { private String owner = "Unknown"; private String group = "Unknown"; - private ListeningExecutorService boundedIOThreadPool; - private ListeningExecutorService boundedCopyThreadPool; + private ExecutorService boundedIOThreadPool; + private ExecutorService boundedCopyThreadPool; public CosNFileSystem() { } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java index 451b5f5d6ce1e..d08e84f99de29 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java @@ -28,8 +28,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; - import org.apache.hadoop.classification.InterfaceAudience; /** @@ -105,8 +103,7 @@ public Thread newThread(Runnable r) { private BlockingThreadPoolExecutorService(int permitCount, ThreadPoolExecutor eventProcessingExecutor) { - super(MoreExecutors.listeningDecorator(eventProcessingExecutor), - permitCount, false); + super(eventProcessingExecutor, permitCount, false); this.eventProcessingExecutor = eventProcessingExecutor; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java index 10471c9365687..c4c11e57b3720 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java @@ -18,10 +18,8 @@ package org.apache.hadoop.util; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ForwardingListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ForwardingExecutorService; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.statistics.DurationTracker; @@ -31,6 +29,7 @@ import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -55,10 +54,10 @@ @SuppressWarnings("NullableProblems") @InterfaceAudience.Private public class SemaphoredDelegatingExecutor extends - ForwardingListeningExecutorService { + ForwardingExecutorService { private final Semaphore queueingPermits; - private final ListeningExecutorService executorDelegatee; + private final ExecutorService executorDelegatee; private final int permitCount; private final DurationTrackerFactory trackerFactory; @@ -70,7 +69,7 @@ public class SemaphoredDelegatingExecutor extends * @param trackerFactory duration tracker factory. */ public SemaphoredDelegatingExecutor( - ListeningExecutorService executorDelegatee, + ExecutorService executorDelegatee, int permitCount, boolean fair, DurationTrackerFactory trackerFactory) { @@ -89,14 +88,14 @@ public SemaphoredDelegatingExecutor( * @param fair should the semaphore be "fair" */ public SemaphoredDelegatingExecutor( - ListeningExecutorService executorDelegatee, + ExecutorService executorDelegatee, int permitCount, boolean fair) { this(executorDelegatee, permitCount, fair, null); } @Override - protected ListeningExecutorService delegate() { + protected ExecutorService delegate() { return executorDelegatee; } @@ -127,7 +126,7 @@ public T invokeAny(Collection> tasks, long timeout, } @Override - public ListenableFuture submit(Callable task) { + public Future submit(Callable task) { try (DurationTracker ignored = trackerFactory.trackDuration(ACTION_EXECUTOR_ACQUIRED)) { queueingPermits.acquire(); @@ -139,7 +138,7 @@ public ListenableFuture submit(Callable task) { } @Override - public ListenableFuture submit(Runnable task, T result) { + public Future submit(Runnable task, T result) { try (DurationTracker ignored = trackerFactory.trackDuration(ACTION_EXECUTOR_ACQUIRED)) { queueingPermits.acquire(); @@ -151,7 +150,7 @@ public ListenableFuture submit(Runnable task, T result) { } @Override - public ListenableFuture submit(Runnable task) { + public Future submit(Runnable task) { try (DurationTracker ignored = trackerFactory.trackDuration(ACTION_EXECUTOR_ACQUIRED)) { queueingPermits.acquire(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java index 01abeaaf577da..67a933bb9e39c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java @@ -27,6 +27,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -423,9 +424,10 @@ private void createFileSystems(final FileSystem.Cache cache, final int count) // only one instance can be created at a time. URI uri = new URI("blocking://a"); ListeningExecutorService pool = - BlockingThreadPoolExecutorService.newInstance(count * 2, 0, + MoreExecutors.listeningDecorator( + BlockingThreadPoolExecutorService.newInstance(count * 2, 0, 10, TimeUnit.SECONDS, - "creation-threads"); + "creation-threads")); // submit a set of requests to create an FS instance. // the semaphore will block all but one, and that will block until diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java index 66fbd89b3236d..759484e4239fa 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java @@ -27,7 +27,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; @@ -78,8 +77,8 @@ public class AliyunOSSFileSystem extends FileSystem { private int maxKeys; private int maxReadAheadPartNumber; private int maxConcurrentCopyTasksPerDir; - private ListeningExecutorService boundedThreadPool; - private ListeningExecutorService boundedCopyThreadPool; + private ExecutorService boundedThreadPool; + private ExecutorService boundedCopyThreadPool; private static final PathFilter DEFAULT_FILTER = new PathFilter() { @Override diff --git a/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml index 1ebf8587e8824..861eb83584e5e 100644 --- a/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml +++ b/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml @@ -84,4 +84,10 @@ + + + + + + diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 7506a5ed66944..4243a4f1a5d3e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -42,6 +42,7 @@ import java.util.Set; import java.util.Objects; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -262,7 +263,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, private long partSize; private boolean enableMultiObjectsDelete; private TransferManager transfers; - private ListeningExecutorService boundedThreadPool; + private ExecutorService boundedThreadPool; private ThreadPoolExecutor unboundedThreadPool; private int executorCapacity; private long multiPartThreshold; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java index b47c7ad3aa005..2292179b3fd66 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java @@ -28,6 +28,7 @@ import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.DeleteObjectsResult; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -207,7 +208,8 @@ public DeleteOperation(final StoreContext context, "page size out of range: %s", pageSize); this.pageSize = pageSize; metadataStore = context.getMetadataStore(); - executor = context.createThrottledExecutor(1); + executor = MoreExecutors.listeningDecorator( + context.createThrottledExecutor(1)); } public long getFilesDeleted() { diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java index 28be486b43812..88231d8af9c04 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java @@ -23,9 +23,11 @@ import java.net.URI; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -127,7 +129,7 @@ public StoreContext( final Configuration configuration, final String username, final UserGroupInformation owner, - final ListeningExecutorService executor, + final ExecutorService executor, final int executorCapacity, final Invoker invoker, final S3AStatisticsContext instrumentation, @@ -144,7 +146,7 @@ public StoreContext( this.configuration = configuration; this.username = username; this.owner = owner; - this.executor = executor; + this.executor = MoreExecutors.listeningDecorator(executor); this.executorCapacity = executorCapacity; this.invoker = invoker; this.instrumentation = instrumentation; @@ -179,7 +181,7 @@ public String getUsername() { return username; } - public ListeningExecutorService getExecutor() { + public ExecutorService getExecutor() { return executor; } @@ -310,7 +312,7 @@ public void incrementGauge(Statistic statistic, long count) { * @param capacity maximum capacity of this executor. * @return an executor for submitting work. */ - public ListeningExecutorService createThrottledExecutor(int capacity) { + public ExecutorService createThrottledExecutor(int capacity) { return new SemaphoredDelegatingExecutor(executor, capacity, true); } @@ -320,7 +322,7 @@ public ListeningExecutorService createThrottledExecutor(int capacity) { * {@link #executorCapacity}. * @return a new executor for exclusive use by the caller. */ - public ListeningExecutorService createThrottledExecutor() { + public ExecutorService createThrottledExecutor() { return createThrottledExecutor(executorCapacity); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java index e1f2cb15b821e..13953f9c985f3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java @@ -19,8 +19,7 @@ package org.apache.hadoop.fs.s3a.impl; import java.net.URI; - -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import java.util.concurrent.ExecutorService; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.Invoker; @@ -46,7 +45,7 @@ public class StoreContextBuilder { private UserGroupInformation owner; - private ListeningExecutorService executor; + private ExecutorService executor; private int executorCapacity; @@ -96,7 +95,7 @@ public StoreContextBuilder setOwner(final UserGroupInformation ugi) { } public StoreContextBuilder setExecutor( - final ListeningExecutorService ex) { + final ExecutorService ex) { this.executor = ex; return this; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index eaf9ee22f916b..b963e7e2532e5 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -67,6 +67,7 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -454,7 +455,8 @@ void bindToOwnerFilesystem(final S3AFileSystem fs) { instrumentation = context.getInstrumentation() .getS3GuardInstrumentation(); username = context.getUsername(); - executor = context.createThrottledExecutor(); + executor = MoreExecutors.listeningDecorator( + context.createThrottledExecutor()); ttlTimeProvider = Preconditions.checkNotNull( context.getTimeProvider(), "ttlTimeProvider must not be null"); @@ -509,13 +511,14 @@ public void initialize(Configuration config, // the executor capacity for work. int executorCapacity = intOption(conf, EXECUTOR_CAPACITY, DEFAULT_EXECUTOR_CAPACITY, 1); - executor = BlockingThreadPoolExecutorService.newInstance( - executorCapacity, - executorCapacity * 2, - longOption(conf, KEEPALIVE_TIME, - DEFAULT_KEEPALIVE_TIME, 0), - TimeUnit.SECONDS, - "s3a-ddb-" + tableName); + executor = MoreExecutors.listeningDecorator( + BlockingThreadPoolExecutorService.newInstance( + executorCapacity, + executorCapacity * 2, + longOption(conf, KEEPALIVE_TIME, + DEFAULT_KEEPALIVE_TIME, 0), + TimeUnit.SECONDS, + "s3a-ddb-" + tableName)); initDataAccessRetries(conf); this.ttlTimeProvider = ttlTp; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java index ce20cc3aa2dcc..55423273b9579 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java @@ -18,7 +18,6 @@ package org.apache.hadoop.fs.s3a; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; import org.apache.hadoop.util.SemaphoredDelegatingExecutor; import org.apache.hadoop.util.StopWatch; @@ -33,6 +32,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import static org.junit.Assert.assertEquals; @@ -70,7 +70,7 @@ public static void afterClass() throws Exception { @Test public void testSubmitCallable() throws Exception { ensureCreated(); - ListenableFuture f = tpe.submit(callableSleeper); + Future f = tpe.submit(callableSleeper); Integer v = f.get(); assertEquals(SOME_VALUE, v); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java index e20e936454b4f..c920be13230dd 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java @@ -34,6 +34,7 @@ import com.amazonaws.services.s3.model.MultiObjectDeleteException; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;; import org.assertj.core.api.Assertions; import org.junit.Test; import org.junit.runner.RunWith; @@ -128,11 +129,12 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase { * For submitting work. */ private static final ListeningExecutorService EXECUTOR = - BlockingThreadPoolExecutorService.newInstance( - EXECUTOR_THREAD_COUNT, - EXECUTOR_THREAD_COUNT * 2, - 30, TimeUnit.SECONDS, - "test-operations"); + MoreExecutors.listeningDecorator( + BlockingThreadPoolExecutorService.newInstance( + EXECUTOR_THREAD_COUNT, + EXECUTOR_THREAD_COUNT * 2, + 30, TimeUnit.SECONDS, + "test-operations")); /** From a467540c75ee28dcb7c2895fab5341d5a4e9b413 Mon Sep 17 00:00:00 2001 From: Hui Fei Date: Wed, 10 Mar 2021 14:11:29 +0800 Subject: [PATCH 080/666] HDFS-15875. Check whether file is being truncated before truncate (#2746) --- .../server/datanode/BlockRecoveryWorker.java | 1 + .../datanode/DataNodeFaultInjector.java | 5 ++ .../hdfs/server/namenode/FSDirTruncateOp.java | 5 ++ .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../server/namenode/TestFileTruncate.java | 69 ++++++++++++++++++- 5 files changed, 80 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java index dfc6f7f13364e..d4687e8331adf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java @@ -119,6 +119,7 @@ protected void recover() throws IOException { List syncList = new ArrayList<>(locs.length); int errorCount = 0; int candidateReplicaCnt = 0; + DataNodeFaultInjector.get().delay(); // Check generation stamps, replica size and state. Replica must satisfy // the following criteria to be included in syncList for recovery: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java index 949c2dd676689..58c589e4d2554 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java @@ -137,4 +137,9 @@ public void delayWhenOfferServiceHoldLock() {} * Used as a hook to inject intercept when re-register. */ public void blockUtilSendFullBlockReport() {} + + /** + * Just delay a while. + */ + public void delay() {} } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index 4104930ca3659..22b0e175018d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @@ -111,6 +112,10 @@ static TruncateResult truncate(final FSNamesystem fsn, final String srcArg, + truncatedBlock.getNumBytes(); if (newLength == truncateLength) { return new TruncateResult(false, fsd.getAuditFileInfo(iip)); + } else { + throw new AlreadyBeingCreatedException( + RecoverLeaseOp.TRUNCATE_FILE.getExceptionMessage(src, + clientName, clientMachine, src + " is being truncated.")); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ff03d7b0522fc..c6902b8ea213c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2823,7 +2823,7 @@ enum RecoverLeaseOp { TRUNCATE_FILE, RECOVER_LEASE; - private String getExceptionMessage(String src, String holder, + public String getExceptionMessage(String src, String holder, String clientMachine, String reason) { return "Failed to " + this + " " + src + " for " + holder + " on " + clientMachine + " because " + reason; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 60f49c442c4ea..57f5ea33eb0ee 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -33,6 +33,9 @@ import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.test.LambdaTestUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -218,6 +221,70 @@ public void testSnapshotTruncateThenDeleteSnapshot() throws IOException { fs.delete(dir, true); } + /** + * Test truncate twice together on a file. + */ + @Test(timeout=90000) + public void testTruncateTwiceTogether() throws Exception { + + Path dir = new Path("/testTruncateTwiceTogether"); + fs.mkdirs(dir); + final Path p = new Path(dir, "file"); + final byte[] data = new byte[100 * BLOCK_SIZE]; + ThreadLocalRandom.current().nextBytes(data); + writeContents(data, data.length, p); + + DataNodeFaultInjector originInjector = DataNodeFaultInjector.get(); + DataNodeFaultInjector injector = new DataNodeFaultInjector() { + @Override + public void delay() { + try { + // Bigger than soft lease period. + Thread.sleep(5000); + } catch (InterruptedException e) { + // Ignore + } + } + }; + // Delay to recovery. + DataNodeFaultInjector.set(injector); + + // Truncate by using different client name. + Thread t = new Thread(() -> { + String hdfsCacheDisableKey = "fs.hdfs.impl.disable.cache"; + boolean originCacheDisable = + conf.getBoolean(hdfsCacheDisableKey, false); + try { + conf.setBoolean(hdfsCacheDisableKey, true); + FileSystem fs1 = FileSystem.get(conf); + fs1.truncate(p, data.length-1); + } catch (IOException e) { + // ignore + } finally{ + conf.setBoolean(hdfsCacheDisableKey, originCacheDisable); + } + }); + t.start(); + t.join(); + NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) + .setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT); + + LambdaTestUtils.intercept(RemoteException.class, + "/testTruncateTwiceTogether/file is being truncated", + () -> fs.truncate(p, data.length - 2)); + + // wait for block recovery + checkBlockRecovery(p); + assertFileLength(p, data.length - 1); + + DataNodeFaultInjector.set(originInjector); + NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) + .setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, + conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY, + DFSConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000); + fs.delete(dir, true); + } + /** * Truncate files and then run other operations such as * rename, set replication, set permission, etc. @@ -631,7 +698,7 @@ public void testTruncateFailure() throws IOException { { try { fs.truncate(p, 0); - fail("Truncate must fail since a trancate is already in pregress."); + fail("Truncate must fail since a truncate is already in progress."); } catch (IOException expected) { GenericTestUtils.assertExceptionContains( "Failed to TRUNCATE_FILE", expected); From 1cb531cc762bd71b83dc062190053f205037f0c2 Mon Sep 17 00:00:00 2001 From: Masatake Iwasaki Date: Wed, 10 Mar 2021 22:23:54 +0900 Subject: [PATCH 081/666] HADOOP-17573. Fix compilation error of OBSFileSystem in trunk. (#2758) --- .../src/main/java/org/apache/hadoop/fs/obs/OBSFileSystem.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSFileSystem.java b/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSFileSystem.java index dd8be1e1fe9dc..aa38c93f80c3b 100644 --- a/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSFileSystem.java +++ b/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSFileSystem.java @@ -19,7 +19,6 @@ package org.apache.hadoop.fs.obs; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import com.obs.services.ObsClient; import com.obs.services.exception.ObsException; import com.obs.services.model.AccessControlList; @@ -53,6 +52,7 @@ import java.io.IOException; import java.net.URI; import java.util.EnumSet; +import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -169,7 +169,7 @@ public final class OBSFileSystem extends FileSystem { /** * Bounded thread pool for multipart upload. */ - private ListeningExecutorService boundedMultipartUploadThreadPool; + private ExecutorService boundedMultipartUploadThreadPool; /** * Bounded thread pool for copy. From a11ea9e43a78348dbcc8b98a8498a5a2eb3e83aa Mon Sep 17 00:00:00 2001 From: Pierrick Hymbert Date: Wed, 10 Mar 2021 16:39:55 +0100 Subject: [PATCH 082/666] [HADOOP-17567] typo in MagicCommitTracker (#2749) Contributed by Pierrick Hymbert --- .../apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java index ddaee19f9f74a..c3a70bffac057 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java @@ -140,7 +140,7 @@ public boolean aboutToComplete(String uploadId, new IOStatisticsSnapshot(iostatistics)); byte[] bytes = commitData.toBytes(); LOG.info("Uncommitted data pending to file {};" - + " commit metadata for {} parts in {}. sixe: {} byte(s)", + + " commit metadata for {} parts in {}. size: {} byte(s)", path.toUri(), parts.size(), pendingPartKey, bytesWritten); LOG.debug("Closed MPU to {}, saved commit information to {}; data=:\n{}", path, pendingPartKey, commitData); From 9507384939ec88b05dd6401fa80a5dec57518174 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 11 Mar 2021 10:56:07 +0900 Subject: [PATCH 083/666] HADOOP-16870. Use spotbugs-maven-plugin instead of findbugs-maven-plugin (#2753) Removed findbugs from the hadoop build images and added spotbugs instead. Upgraded SpotBugs to 4.2.2 and spotbugs-maven-plugin to 4.2.0. Reviewed-by: Masatake Iwasaki --- BUILDING.txt | 2 +- dev-support/Jenkinsfile | 6 +++--- dev-support/bin/hadoop.sh | 4 ++-- dev-support/docker/Dockerfile | 12 ++++++++++-- dev-support/docker/Dockerfile_aarch64 | 12 ++++++++++-- .../hadoop-cos/pom.xml | 5 ++--- hadoop-common-project/hadoop-auth/pom.xml | 4 ++-- hadoop-common-project/hadoop-kms/pom.xml | 4 ++-- hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++-- hadoop-common-project/hadoop-nfs/pom.xml | 4 ++-- hadoop-common-project/hadoop-registry/pom.xml | 5 ++--- hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 4 ++-- .../hadoop-mapreduce-client/pom.xml | 5 ++--- .../hadoop-mapreduce-examples/pom.xml | 5 ++--- hadoop-mapreduce-project/pom.xml | 12 ++++-------- hadoop-project-dist/pom.xml | 5 ++--- hadoop-project/pom.xml | 17 ----------------- hadoop-tools/hadoop-aliyun/pom.xml | 5 ++--- hadoop-tools/hadoop-archive-logs/pom.xml | 5 ++--- hadoop-tools/hadoop-aws/pom.xml | 5 ++--- hadoop-tools/hadoop-azure/pom.xml | 5 ++--- hadoop-tools/hadoop-datajoin/pom.xml | 5 ++--- hadoop-tools/hadoop-fs2img/pom.xml | 5 ++--- hadoop-tools/hadoop-gridmix/pom.xml | 5 ++--- hadoop-tools/hadoop-kafka/pom.xml | 5 ++--- hadoop-tools/hadoop-openstack/pom.xml | 5 ++--- hadoop-tools/hadoop-rumen/pom.xml | 5 ++--- hadoop-tools/hadoop-sls/pom.xml | 5 ++--- hadoop-tools/hadoop-streaming/pom.xml | 5 ++--- .../pom.xml | 4 ++-- hadoop-yarn-project/hadoop-yarn/pom.xml | 5 ++--- hadoop-yarn-project/pom.xml | 7 ++----- pom.xml | 18 ++++++++++++++++++ 33 files changed, 98 insertions(+), 106 deletions(-) diff --git a/BUILDING.txt b/BUILDING.txt index 821056f81de7f..9bbb6dbf891a9 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -139,7 +139,7 @@ Maven build goals: * Compile : mvn compile [-Pnative] * Run tests : mvn test [-Pnative] [-Pshelltest] * Create JAR : mvn package - * Run findbugs : mvn compile findbugs:findbugs + * Run spotbugs : mvn compile spotbugs:spotbugs * Run checkstyle : mvn compile checkstyle:checkstyle * Install JAR in M2 cache : mvn install * Deploy JAR to Maven repo : mvn deploy diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 6841ed30a79cc..60d2f7f645b31 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -120,11 +120,11 @@ pipeline { YETUS_ARGS+=("--proclimit=5500") YETUS_ARGS+=("--dockermemlimit=22g") - # -1 findbugs issues that show up prior to the patch being applied - YETUS_ARGS+=("--findbugs-strict-precheck") + # -1 spotbugs issues that show up prior to the patch being applied + YETUS_ARGS+=("--spotbugs-strict-precheck") # rsync these files back into the archive dir - YETUS_ARGS+=("--archive-list=checkstyle-errors.xml,findbugsXml.xml") + YETUS_ARGS+=("--archive-list=checkstyle-errors.xml,spotbugsXml.xml") # URL for user-side presentation in reports and such to our artifacts # (needs to match the archive bits below) diff --git a/dev-support/bin/hadoop.sh b/dev-support/bin/hadoop.sh index 3343014aae8bb..beebea8c97f6b 100755 --- a/dev-support/bin/hadoop.sh +++ b/dev-support/bin/hadoop.sh @@ -482,7 +482,7 @@ function personality_file_tests fi if [[ ${filename} =~ \.java$ ]]; then - add_test findbugs + add_test spotbugs fi } @@ -550,7 +550,7 @@ function shadedclient_rebuild echo_and_redirect "${logfile}" \ "${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \ "${modules[@]}" \ - -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dfindbugs.skip=true + -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true count=$("${GREP}" -c '\[ERROR\]' "${logfile}") if [[ ${count} -gt 0 ]]; then diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 930d44a839c4f..11f3c6f739b86 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -44,7 +44,6 @@ RUN apt-get -q update \ cmake \ curl \ doxygen \ - findbugs \ fuse \ g++ \ gcc \ @@ -92,7 +91,16 @@ RUN apt-get -q update \ ENV MAVEN_HOME /usr # JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 -ENV FINDBUGS_HOME /usr + +####### +# Install SpotBugs 4.2.2 +####### +RUN mkdir -p /opt/spotbugs \ + && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ + -o /opt/spotbugs.tgz \ + && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ + && chmod +x /opt/spotbugs/bin/* +ENV SPOTBUGS_HOME /opt/spotbugs ####### # Install Boost 1.72 (1.71 ships with Focal) diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index 858c9b36ddf05..362c5466a4a14 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -48,7 +48,6 @@ RUN apt-get -q update \ cmake \ curl \ doxygen \ - findbugs \ fuse \ g++ \ gcc \ @@ -96,7 +95,16 @@ RUN apt-get -q update \ ENV MAVEN_HOME /usr # JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-arm64 -ENV FINDBUGS_HOME /usr + +####### +# Install SpotBugs 4.2.2 +####### +RUN mkdir -p /opt/spotbugs \ + && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ + -o /opt/spotbugs.tgz \ + && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ + && chmod +x /opt/spotbugs/bin/* +ENV SPOTBUGS_HOME /opt/spotbugs ####### # Install Boost 1.72 (1.71 ships with Focal) diff --git a/hadoop-cloud-storage-project/hadoop-cos/pom.xml b/hadoop-cloud-storage-project/hadoop-cos/pom.xml index d18b09f450408..fa47e354c7998 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cos/pom.xml @@ -64,10 +64,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 10e0b9c825677..8e8526c7450e0 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -237,8 +237,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 338af127f8bd7..9de8b9caf6e68 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -237,8 +237,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index c76abf750b78d..c292aebbe3656 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -53,8 +53,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml index b3c727910e7c2..f5de8407a0920 100644 --- a/hadoop-common-project/hadoop-nfs/pom.xml +++ b/hadoop-common-project/hadoop-nfs/pom.xml @@ -107,8 +107,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-registry/pom.xml b/hadoop-common-project/hadoop-registry/pom.xml index 5e3e2edcb9690..0058832e6110b 100644 --- a/hadoop-common-project/hadoop-registry/pom.xml +++ b/hadoop-common-project/hadoop-registry/pom.xml @@ -163,10 +163,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${project.basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index de7112270883a..1916ef0e3b7f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -337,8 +337,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 0d76285cded61..55940ae4ff744 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -157,10 +157,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${mr.basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index ec3a0514b26fc..48cf27efe437f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -138,10 +138,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${mr.examples.basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index 5ec1db63240ec..09b4d42606118 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -178,10 +178,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${mr.basedir}/dev-support/findbugs-exclude.xml Max @@ -299,12 +298,9 @@ - org.codehaus.mojo - findbugs-maven-plugin - - 2.3.2 + com.github.spotbugs + spotbugs-maven-plugin - true true diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index 410ec8635eecb..10a0526519de2 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -89,11 +89,10 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml - true 2048 diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 7c74615dbce4c..9e728e1513ebc 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -101,7 +101,6 @@ 3.5.6 4.2.0 3.0.5 - 4.0.6 2.1.7 27.0-jre @@ -1854,18 +1853,6 @@ maven-war-plugin ${maven-war-plugin.version} - - org.codehaus.mojo - findbugs-maven-plugin - ${findbugs.version} - - - com.github.spotbugs - spotbugs - ${spotbugs.version} - - - org.codehaus.mojo make-maven-plugin @@ -2099,10 +2086,6 @@ - - org.codehaus.mojo - findbugs-maven-plugin - org.apache.maven.plugins maven-antrun-plugin diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml index 284c6517821a8..8a68d9b4f7db6 100644 --- a/hadoop-tools/hadoop-aliyun/pom.xml +++ b/hadoop-tools/hadoop-aliyun/pom.xml @@ -58,10 +58,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml index 10bc9a0833209..3caa26c52f884 100644 --- a/hadoop-tools/hadoop-archive-logs/pom.xml +++ b/hadoop-tools/hadoop-archive-logs/pom.xml @@ -194,10 +194,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index f6d70230944fc..0cab5ada2169d 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -399,10 +399,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index 4176305c32c53..d5d0f1fcd7303 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -50,10 +50,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-datajoin/pom.xml b/hadoop-tools/hadoop-datajoin/pom.xml index e26fa5dc33352..ec77281b66350 100644 --- a/hadoop-tools/hadoop-datajoin/pom.xml +++ b/hadoop-tools/hadoop-datajoin/pom.xml @@ -108,10 +108,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-fs2img/pom.xml b/hadoop-tools/hadoop-fs2img/pom.xml index 6f268ca616f91..9a273621cfa9d 100644 --- a/hadoop-tools/hadoop-fs2img/pom.xml +++ b/hadoop-tools/hadoop-fs2img/pom.xml @@ -87,10 +87,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-gridmix/pom.xml b/hadoop-tools/hadoop-gridmix/pom.xml index afa9cf5d3673e..f383ecad64150 100644 --- a/hadoop-tools/hadoop-gridmix/pom.xml +++ b/hadoop-tools/hadoop-gridmix/pom.xml @@ -123,10 +123,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml index 8789b13c51a49..a6f854a1ec272 100644 --- a/hadoop-tools/hadoop-kafka/pom.xml +++ b/hadoop-tools/hadoop-kafka/pom.xml @@ -39,10 +39,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true Max diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml index a4624c54e6871..e1065883feb9e 100644 --- a/hadoop-tools/hadoop-openstack/pom.xml +++ b/hadoop-tools/hadoop-openstack/pom.xml @@ -66,10 +66,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml index 6afc51c1f21c8..fd3b5c9a1b8fa 100644 --- a/hadoop-tools/hadoop-rumen/pom.xml +++ b/hadoop-tools/hadoop-rumen/pom.xml @@ -102,10 +102,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml index ce9197cd21dcc..5bb5a415d90f6 100644 --- a/hadoop-tools/hadoop-sls/pom.xml +++ b/hadoop-tools/hadoop-sls/pom.xml @@ -108,10 +108,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml index a419e10d753ff..f03eb001450b4 100644 --- a/hadoop-tools/hadoop-streaming/pom.xml +++ b/hadoop-tools/hadoop-streaming/pom.xml @@ -129,10 +129,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml index 072e70a894d35..12b0112680320 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml @@ -389,8 +389,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin true diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index c8d9ddd9d007d..dff9a2b08a9f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -39,10 +39,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${yarn.basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index d05d1df2c548f..ae40fef3f716b 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -204,12 +204,9 @@ - org.codehaus.mojo - findbugs-maven-plugin - - 2.3.2 + com.github.spotbugs + spotbugs-maven-plugin - true true diff --git a/pom.xml b/pom.xml index 7e94cfb8ad0b0..32090f86205d3 100644 --- a/pom.xml +++ b/pom.xml @@ -113,6 +113,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x 3.1.0 8.29 1.4.3 + 4.2.2 + 4.2.0 bash @@ -342,6 +344,18 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x dependency-check-maven ${dependency-check-maven.version} + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + + com.github.spotbugs + spotbugs + ${spotbugs.version} + + + @@ -443,6 +457,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x dependency-check-maven ${dependency-check-maven.version} + + com.github.spotbugs + spotbugs-maven-plugin + From 53dc3f3cb8b54aadc4c6c35e4e0a5570f04392f9 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Thu, 11 Mar 2021 12:47:39 +0000 Subject: [PATCH 084/666] HADOOP-16721. Improve S3A rename resilience (#2742) The S3A connector's rename() operation now raises FileNotFoundException if the source doesn't exist; a FileAlreadyExistsException if the destination exists and is unsuitable for the source file/directory. When renaming to a path which does not exist, the connector no longer checks for the destination parent directory existing -instead it simply verifies that there is no file immediately above the destination path. This is needed to avoid race conditions with delete() and rename() calls working on adjacent subdirectories. Contributed by Steve Loughran. --- .../site/markdown/filesystem/filesystem.md | 12 +- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 37 +-- .../tools/hadoop-aws/troubleshooting_s3a.md | 34 +++ .../contract/s3a/ITestS3AContractRename.java | 10 +- .../fs/s3a/ITestS3AFileSystemContract.java | 50 +++- .../fs/s3a/ITestS3GuardListConsistency.java | 2 +- .../hadoop/fs/s3a/MockS3AFileSystem.java | 2 +- .../apache/hadoop/fs/s3a/S3ATestUtils.java | 10 + .../fs/s3a/impl/ITestRenameDeleteRace.java | 248 ++++++++++++++++++ .../fs/s3a/performance/OperationCost.java | 2 +- .../src/test/resources/contract/s3a.xml | 14 +- 11 files changed, 385 insertions(+), 36 deletions(-) create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md index 433212491b52f..a5a35df30c0b5 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md @@ -1164,7 +1164,7 @@ deletion, preventing the stores' use as drop-in replacements for HDFS. ### `boolean rename(Path src, Path d)` -In terms of its specification, `rename()` is one of the most complex operations within a filesystem . +In terms of its specification, `rename()` is one of the most complex operations within a filesystem. In terms of its implementation, it is the one with the most ambiguity regarding when to return false versus raising an exception. @@ -1187,7 +1187,6 @@ Source `src` must exist: exists(FS, src) else raise FileNotFoundException - `dest` cannot be a descendant of `src`: if isDescendant(FS, src, dest) : raise IOException @@ -1283,6 +1282,15 @@ that the parent directories of the destination also exist. exists(FS', parent(dest)) +*S3A FileSystem* + +The outcome is as a normal rename, with the additional (implicit) feature that +the parent directories of the destination then exist: +`exists(FS', parent(dest))` + +There is a check for and rejection if the `parent(dest)` is a file, but +no checks for any other ancestors. + *Other Filesystems (including Swift) * Other filesystems strictly reject the operation, raising a `FileNotFoundException` diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 4243a4f1a5d3e..f625346957f4a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -1463,9 +1463,6 @@ public boolean rename(Path src, Path dst) throws IOException { LOG.info("{}", e.getMessage()); LOG.debug("rename failure", e); return e.getExitCode(); - } catch (FileNotFoundException e) { - LOG.debug(e.toString()); - return false; } } @@ -1518,9 +1515,9 @@ private Pair initiateRename( // whether or not it can be the destination of the rename. if (srcStatus.isDirectory()) { if (dstStatus.isFile()) { - throw new RenameFailedException(src, dst, - "source is a directory and dest is a file") - .withExitCode(srcStatus.isFile()); + throw new FileAlreadyExistsException( + "Failed to rename " + src + " to " + dst + +"; source is a directory and dest is a file"); } else if (dstStatus.isEmptyDirectory() != Tristate.TRUE) { throw new RenameFailedException(src, dst, "Destination is a non-empty directory") @@ -1531,9 +1528,9 @@ private Pair initiateRename( // source is a file. The destination must be a directory, // empty or not if (dstStatus.isFile()) { - throw new RenameFailedException(src, dst, - "Cannot rename onto an existing file") - .withExitCode(false); + throw new FileAlreadyExistsException( + "Failed to rename " + src + " to " + dst + + "; destination file exists"); } } @@ -1544,17 +1541,24 @@ private Pair initiateRename( if (!pathToKey(parent).isEmpty() && !parent.equals(src.getParent())) { try { - // only look against S3 for directories; saves - // a HEAD request on all normal operations. + // make sure parent isn't a file. + // don't look for parent being a dir as there is a risk + // of a race between dest dir cleanup and rename in different + // threads. S3AFileStatus dstParentStatus = innerGetFileStatus(parent, - false, StatusProbeEnum.DIRECTORIES); + false, StatusProbeEnum.FILE); + // if this doesn't raise an exception then it's one of + // raw S3: parent is a file: error + // guarded S3: parent is a file or a dir. if (!dstParentStatus.isDirectory()) { throw new RenameFailedException(src, dst, "destination parent is not a directory"); } - } catch (FileNotFoundException e2) { - throw new RenameFailedException(src, dst, - "destination has no parent "); + } catch (FileNotFoundException expected) { + // nothing was found. Don't worry about it; + // expect rename to implicitly create the parent dir (raw S3) + // or the s3guard parents (guarded) + } } } @@ -2761,7 +2765,8 @@ private void createFakeDirectoryIfNecessary(Path f) * @throws IOException IO problem */ @Retries.RetryTranslated - void maybeCreateFakeParentDirectory(Path path) + @VisibleForTesting + protected void maybeCreateFakeParentDirectory(Path path) throws IOException, AmazonClientException { Path parent = path.getParent(); if (parent != null && !parent.isRoot()) { diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md index 6cdbe3e198769..416793b8ed957 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md @@ -1126,6 +1126,40 @@ We also recommend using applications/application options which do not rename files when committing work or when copying data to S3, but instead write directly to the final destination. +## Rename not behaving as "expected" + +S3 is not a filesystem. The S3A connector mimics file and directory rename by + +* HEAD then LIST of source path. The source MUST exist, else a `FileNotFoundException` + is raised. +* HEAD then LIST of the destination path. + This SHOULD NOT exist. + If it does and if the source is a directory, the destination MUST be an empty directory. + If the source is a file, the destination MAY be a directory, empty or not. + If the destination exists and relevant conditions are not met, a `FileAlreadyExistsException` + is raised. +* If the destination path does not exist, a HEAD request of the parent path + to verify that there is no object there. + Directory markers are not checked for, nor that the path has any children, +* File-by-file copy of source objects to destination. + Parallelized, with page listings of directory objects and issuing of DELETE requests. +* Post-delete recreation of source parent directory marker, if needed. + +This is slow (`O(data)`) and can cause timeouts on code which is required +to send regular progress reports/heartbeats -for example, distCp. +It is _very unsafe_ if the calling code expects atomic renaming as part +of any commit algorithm. +This is why the [S3A Committers](committers.md) or similar are needed to safely +commit output. + +There is also the risk of race conditions arising if many processes/threads +are working with the same directory tree +[HADOOP-16721](https://issues.apache.org/jira/browse/HADOOP-16721). + +To reduce this risk, since Hadoop 3.3.1, the S3A connector no longer verifies the parent directory +of the destination of a rename is a directory -only that it is _not_ a file. +You can rename a directory or file deep under a file if you try -after which +there is no guarantee of the files being found in listings. Try not to do that. ## S3 Server Side Encryption diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java index e623d5d27941b..e44df5facd2c3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.s3a.Statistic; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; +import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyFileContents; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE; @@ -106,9 +107,7 @@ public void setup() throws Exception { @Override public void testRenameDirIntoExistingDir() throws Throwable { - describe("Verify renaming a dir into an existing dir puts the files" - +" from the source dir into the existing dir" - +" and leaves existing files alone"); + describe("S3A rename into an existing directory returns false"); FileSystem fs = getFileSystem(); String sourceSubdir = "source"; Path srcDir = path(sourceSubdir); @@ -169,4 +168,9 @@ public void testRenamePopulatesFileAncestors2() throws Exception { validateAncestorsMoved(src, dest, nestedFile); } + + @Override + public void testRenameFileUnderFileSubdir() throws Exception { + skip("Rename deep paths under files is allowed"); + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java index 46d6ffc85e03b..7ce7b8385cec4 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java @@ -18,6 +18,8 @@ package org.apache.hadoop.fs.s3a; +import java.io.FileNotFoundException; + import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -25,21 +27,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.apache.hadoop.fs.Path; +import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assume.*; import static org.junit.Assert.*; /** * Tests a live S3 system. If your keys and bucket aren't specified, all tests * are marked as passed. - * - * This uses BlockJUnit4ClassRunner because FileSystemContractBaseTest from - * TestCase which uses the old Junit3 runner that doesn't ignore assumptions - * properly making it impossible to skip the tests if we don't have a valid - * bucket. - **/ + */ public class ITestS3AFileSystemContract extends FileSystemContractBaseTest { protected static final Logger LOG = @@ -77,7 +77,7 @@ public Path getTestBaseDir() { @Test public void testMkdirsWithUmask() throws Exception { - // not supported + skip("Not supported"); } @Test @@ -103,8 +103,38 @@ public void testRenameDirectoryAsExistingDirectory() throws Exception { } @Test - public void testMoveDirUnderParent() throws Throwable { - // not support because - // Fails if dst is a directory that is not empty. + public void testRenameDirectoryAsExistingFile() throws Exception { + assumeTrue(renameSupported()); + + Path src = path("testRenameDirectoryAsExistingFile/dir"); + fs.mkdirs(src); + Path dst = path("testRenameDirectoryAsExistingFileNew/newfile"); + createFile(dst); + intercept(FileAlreadyExistsException.class, + () -> rename(src, dst, false, true, true)); + } + + @Test + public void testRenameDirectoryMoveToNonExistentDirectory() + throws Exception { + skip("does not fail"); + } + + @Test + public void testRenameFileMoveToNonExistentDirectory() throws Exception { + skip("does not fail"); + } + + @Test + public void testRenameFileAsExistingFile() throws Exception { + intercept(FileAlreadyExistsException.class, + () -> super.testRenameFileAsExistingFile()); + } + + @Test + public void testRenameNonExistentPath() throws Exception { + intercept(FileNotFoundException.class, + () -> super.testRenameNonExistentPath()); + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java index 75653b1f6aff4..09f66df4c2ec0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java @@ -196,7 +196,7 @@ public void testRollingRenames() throws Exception { } S3AFileSystem fs = getFileSystem(); - assertFalse("Renaming deleted file should have failed", + intercept(FileNotFoundException.class, () -> fs.rename(dir2[0], dir1[0])); assertTrue("Renaming over existing file should have succeeded", fs.rename(dir1[0], dir0[0])); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java index 1570e10dd05c4..e2915884cefa3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java @@ -332,7 +332,7 @@ void deleteObjectAtPath(Path f, } @Override - void maybeCreateFakeParentDirectory(Path path) + protected void maybeCreateFakeParentDirectory(Path path) throws IOException, AmazonClientException { // no-op } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index bba5041e4a610..599f18cd9ad62 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -827,6 +827,16 @@ public static void removeBaseAndBucketOverrides( removeBaseAndBucketOverrides(getTestBucketName(conf), conf, options); } + /** + * Disable S3Guard from the test bucket in a configuration. + * @param conf configuration. + */ + public static void disableS3GuardInTestBucket(Configuration conf) { + removeBaseAndBucketOverrides(getTestBucketName(conf), conf, + S3_METADATA_STORE_IMPL, + DIRECTORY_MARKER_POLICY); + conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL); + } /** * Call a function; any exception raised is logged at info. * This is for test teardowns. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java new file mode 100644 index 0000000000000..9885eb5698477 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java @@ -0,0 +1,248 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +import com.amazonaws.AmazonClientException; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.util.BlockingThreadPoolExecutorService; + +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableS3GuardInTestBucket; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion; +import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; + +/** + * HADOOP-16721: race condition with delete and rename underneath the + * same destination directory. + * This test suite recreates the failure using semaphores to + * guarantee the failure condition is encountered + * -then verifies that the rename operation is successful. + */ +public class ITestRenameDeleteRace extends AbstractS3ATestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestRenameDeleteRace.class); + + + /** Many threads for scale performance: {@value}. */ + public static final int EXECUTOR_THREAD_COUNT = 2; + + /** + * For submitting work. + */ + private static final BlockingThreadPoolExecutorService EXECUTOR = + BlockingThreadPoolExecutorService.newInstance( + EXECUTOR_THREAD_COUNT, + EXECUTOR_THREAD_COUNT * 2, + 30, TimeUnit.SECONDS, + "test-operations"); + + @Override + protected Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + + // use the keep policy to ensure that surplus markers exist + // to complicate failures + conf.set(DIRECTORY_MARKER_POLICY, DIRECTORY_MARKER_POLICY_DELETE); + removeBaseAndBucketOverrides(getTestBucketName(conf), + conf, + DIRECTORY_MARKER_POLICY); + disableS3GuardInTestBucket(conf); + return conf; + } + + /** + * This test uses a subclass of S3AFileSystem to recreate the race between + * subdirectory delete and rename. + * The JUnit thread performs the rename, while an executor-submitted + * thread performs the delete. + * Semaphores are used to + * -block the JUnit thread from initiating the rename until the delete + * has finished the delete phase, and has reached the + * {@code maybeCreateFakeParentDirectory()} call. + * A second semaphore is used to block the delete thread from + * listing and recreating the deleted directory until after + * the JUnit thread has completed. + * Together, the two semaphores guarantee that the rename() + * call will be made at exactly the moment when the destination + * directory no longer exists. + */ + @Test + public void testDeleteRenameRaceCondition() throws Throwable { + describe("verify no race between delete and rename"); + + // the normal FS is used for path setup, verification + // and the rename call. + final S3AFileSystem fs = getFileSystem(); + final Path path = path(getMethodName()); + Path srcDir = new Path(path, "src"); + + // this dir must exist throughout the rename + Path destDir = new Path(path, "dest"); + // this dir tree will be deleted in a thread which does not + // complete before the rename exists + Path destSubdir1 = new Path(destDir, "subdir1"); + Path subfile1 = new Path(destSubdir1, "subfile1"); + + // this is the directory we want to copy over under the dest dir + Path srcSubdir2 = new Path(srcDir, "subdir2"); + Path srcSubfile = new Path(srcSubdir2, "subfile2"); + Path destSubdir2 = new Path(destDir, "subdir2"); + + // creates subfile1 and all parents, so that + // dest/subdir1/subfile1 exists as a file; + // dest/subdir1 and dest are directories without markers + ContractTestUtils.touch(fs, subfile1); + assertIsDirectory(destDir); + + // source subfile + ContractTestUtils.touch(fs, srcSubfile); + + // this is the FS used for delete() + final BlockingFakeDirMarkerFS blockingFS + = new BlockingFakeDirMarkerFS(); + blockingFS.initialize(fs.getUri(), fs.getConf()); + // get the semaphore; this ensures that the next attempt to create + // a fake marker blocks + blockingFS.blockFakeDirCreation(); + try { + final CompletableFuture future = submit(EXECUTOR, () -> { + LOG.info("deleting {}", destSubdir1); + blockingFS.delete(destSubdir1, true); + return destSubdir1; + }); + + // wait for the blocking FS to return from the DELETE call. + blockingFS.awaitFakeDirCreation(); + + try { + // there is now no destination directory + assertPathDoesNotExist("should have been implicitly deleted", + destDir); + + // attempt the rename in the normal FS. + LOG.info("renaming {} to {}", srcSubdir2, destSubdir2); + Assertions.assertThat(fs.rename(srcSubdir2, destSubdir2)) + .describedAs("rename(%s, %s)", srcSubdir2, destSubdir2) + .isTrue(); + // dest dir implicitly exists. + assertPathExists("must now exist", destDir); + } finally { + // release the remaining semaphore so that the deletion thread exits. + blockingFS.allowFakeDirCreationToProceed(); + } + + // now let the delete complete + LOG.info("Waiting for delete {} to finish", destSubdir1); + waitForCompletion(future); + + // everything still exists + assertPathExists("must now exist", destDir); + assertPathExists("must now exist", new Path(destSubdir2, "subfile2")); + assertPathDoesNotExist("Src dir deleted", srcSubdir2); + + } finally { + cleanupWithLogger(LOG, blockingFS); + } + + } + + /** + * Subclass of S3A FS whose execution of maybeCreateFakeParentDirectory + * can be choreographed with another thread so as to reliably + * create the delete/rename race condition. + * This class is only intended for "single shot" API calls. + */ + private final class BlockingFakeDirMarkerFS extends S3AFileSystem { + + /** + * Block for entry into maybeCreateFakeParentDirectory(); will be released + * then. + */ + private final Semaphore signalCreatingFakeParentDirectory = + new Semaphore(1); + + /** + * Semaphore to acquire before the marker can be listed/created. + */ + private final Semaphore blockBeforeCreatingMarker = new Semaphore(1); + + private BlockingFakeDirMarkerFS() { + signalCreatingFakeParentDirectory.acquireUninterruptibly(); + } + + @Override + protected void maybeCreateFakeParentDirectory(final Path path) + throws IOException, AmazonClientException { + LOG.info("waking anything blocked on the signal semaphore"); + // notify anything waiting + signalCreatingFakeParentDirectory.release(); + // acquire the semaphore and then create any fake directory + LOG.info("blocking for creation"); + blockBeforeCreatingMarker.acquireUninterruptibly(); + try { + LOG.info("probing for/creating markers"); + super.maybeCreateFakeParentDirectory(path); + } finally { + // and release the marker for completeness. + blockBeforeCreatingMarker.release(); + } + } + + /** + * Block until fake dir creation is invoked. + */ + public void blockFakeDirCreation() throws InterruptedException { + blockBeforeCreatingMarker.acquire(); + } + + /** + * wait for the blocking FS to return from the DELETE call. + */ + public void awaitFakeDirCreation() throws InterruptedException { + LOG.info("Blocking until maybeCreateFakeParentDirectory() is reached"); + signalCreatingFakeParentDirectory.acquire(); + } + + public void allowFakeDirCreationToProceed() { + LOG.info("Allowing the fake directory LIST/PUT to proceed."); + blockBeforeCreatingMarker.release(); + } + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCost.java index c2e0c04fbe63a..af4cfba0aa078 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCost.java @@ -134,7 +134,7 @@ public final class OperationCost { public static final OperationCost RENAME_SINGLE_FILE_DIFFERENT_DIR = FILE_STATUS_FILE_PROBE // source file probe .plus(GET_FILE_STATUS_FNFE) // dest does not exist - .plus(FILE_STATUS_DIR_PROBE) // parent dir of dest + .plus(FILE_STATUS_FILE_PROBE) // parent dir of dest is not file .plus(FILE_STATUS_DIR_PROBE) // recreate source parent dir? .plus(COPY_OP); // metadata read on copy diff --git a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml index 6251aab7859c7..a5d98a32e6fc2 100644 --- a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml +++ b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml @@ -48,13 +48,23 @@ - fs.contract.rename-returns-false-if-source-missing + fs.contract.rename-creates-dest-dirs true + + fs.contract.rename-returns-false-if-source-missing + false + + + + fs.contract.rename-overwrites-dest + false + + fs.contract.rename-returns-false-if-dest-exists - true + false From 91e5d3277e4c958f869ff5594eee71215d116cc7 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 11 Mar 2021 21:48:47 +0900 Subject: [PATCH 085/666] HADOOP-17570. Apply YETUS-1102 to re-enable GitHub comments (#2745) Reviewed-by: Ayush Saxena --- dev-support/Jenkinsfile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 60d2f7f645b31..f18b3c6e2f0fc 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -35,7 +35,7 @@ pipeline { DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile" YETUS='yetus' // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION='rel/0.13.0' + YETUS_VERSION='11eb9b09786e401fbdeaa3be83a19a4066fd7813' } parameters { @@ -157,6 +157,10 @@ pipeline { # custom javadoc goals YETUS_ARGS+=("--mvn-javadoc-goals=process-sources,javadoc:javadoc-no-fork") + # write Yetus report as GitHub comment (YETUS-1102) + YETUS_ARGS+=("--github-write-comment") + YETUS_ARGS+=("--github-use-emoji-vote") + "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" ''' } From 2b5c554ae81747726505f65239857e9a9cd93063 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Fri, 12 Mar 2021 01:21:24 +0530 Subject: [PATCH 086/666] HADOOP-17571 : Bump up woodstox-core to 5.3.0 due to security concerns (#2757) Contributed by Viraj Jasani. Signed-off-by: Mingliang Liu Signed-off-by: Akira Ajisaka --- hadoop-project/pom.xml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 9e728e1513ebc..c0e8e2ca4c81a 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -209,6 +209,7 @@ 7.7.0 1.0.7.Final 1.0.2 + 5.3.0 @@ -1131,7 +1132,7 @@ com.fasterxml.woodstox woodstox-core - 5.0.3 + ${woodstox.version} org.codehaus.jackson From 7160cd235cba09cff0a858c14b26ee13c35dd085 Mon Sep 17 00:00:00 2001 From: Masatake Iwasaki Date: Fri, 12 Mar 2021 10:26:46 +0900 Subject: [PATCH 087/666] HADOOP-17514. Remove trace subcommand from hadoop CLI. (#2680) --- hadoop-common-project/hadoop-common/src/main/bin/hadoop | 4 ---- .../hadoop-common/src/main/bin/hadoop.cmd | 7 +------ .../hadoop-common/src/site/markdown/CommandsManual.md | 4 ---- 3 files changed, 1 insertion(+), 14 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop index 7f46e7e2ab2a2..abf3573986a42 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop @@ -44,7 +44,6 @@ function hadoop_usage hadoop_add_subcommand "kerbname" client "show auth_to_local principal conversion" hadoop_add_subcommand "key" client "manage keys via the KeyProvider" hadoop_add_subcommand "registrydns" daemon "run the registry DNS server" - hadoop_add_subcommand "trace" client "view and modify Hadoop tracing settings" hadoop_add_subcommand "version" client "print the version" hadoop_add_subcommand "kdiag" client "Diagnose Kerberos Problems" hadoop_add_subcommand "rbfbalance" client "move directories and files across router-based federation namespaces" @@ -166,9 +165,6 @@ function hadoopcmd_case HADOOP_SECURE_CLASSNAME='org.apache.hadoop.registry.server.dns.PrivilegedRegistryDNSStarter' HADOOP_CLASSNAME='org.apache.hadoop.registry.server.dns.RegistryDNSServer' ;; - trace) - HADOOP_CLASSNAME=org.apache.hadoop.tracing.TraceAdmin - ;; version) HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo ;; diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd b/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd index 04e5039d19812..ed25183a9e3c1 100644 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd @@ -149,7 +149,7 @@ call :updatepath %HADOOP_BIN_PATH% exit /b ) - set corecommands=fs version jar checknative conftest distch distcp daemonlog archive classpath credential kerbname key trace kdiag + set corecommands=fs version jar checknative conftest distch distcp daemonlog archive classpath credential kerbname key kdiag for %%i in ( %corecommands% ) do ( if %hadoop-command% == %%i set corecommand=true ) @@ -244,10 +244,6 @@ call :updatepath %HADOOP_BIN_PATH% set CLASS=org.apache.hadoop.crypto.key.KeyShell goto :eof -:trace - set CLASS=org.apache.hadoop.tracing.TraceAdmin - goto :eof - :updatepath set path_to_add=%* set current_path_comparable=%path% @@ -318,7 +314,6 @@ call :updatepath %HADOOP_BIN_PATH% @echo kerbname show auth_to_local principal conversion @echo kdiag diagnose kerberos problems @echo key manage keys via the KeyProvider - @echo trace view and modify Hadoop tracing settings @echo daemonlog get/set the log level for each daemon @echo or @echo CLASSNAME run the class named CLASSNAME diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md index 4842d5b86d621..59a73ba7b42c4 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md @@ -240,10 +240,6 @@ Usage: `hadoop kms` Run KMS, the Key Management Server. -### `trace` - -View and modify Hadoop tracing settings. See the [Tracing Guide](./Tracing.html). - ### `version` Usage: `hadoop version` From 657ee13db8e41a55ab09b82e044914a40429bf2d Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Fri, 12 Mar 2021 15:42:15 +0900 Subject: [PATCH 088/666] HDFS-15848. Snapshot Operations: Add debug logs at the entry point. Contributed by Bhavik Patel. --- .../hadoop/hdfs/server/namenode/FSDirSnapshotOp.java | 11 ++++++++++- .../hdfs/server/namenode/NameNodeRpcServer.java | 7 +++++++ .../server/namenode/snapshot/SnapshotManager.java | 3 ++- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java index d45c0c30d4907..8925f0075faff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java @@ -36,6 +36,8 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; @@ -43,6 +45,9 @@ import java.util.List; class FSDirSnapshotOp { + public static final Logger LOG = + LoggerFactory.getLogger(FSDirSnapshotOp.class); + /** Verify if the snapshot name is legal. */ static void verifySnapshotName(FSDirectory fsd, String snapshotName, String path) @@ -118,7 +123,7 @@ static String createSnapshot( } fsd.getEditLog().logCreateSnapshot(snapshotRoot, snapshotName, logRetryCache, now); - + LOG.info("Created Snapshot for SnapshotRoot {}", snapshotRoot); return snapshotPath; } @@ -141,6 +146,8 @@ static void renameSnapshot(FSDirectory fsd, FSPermissionChecker pc, } fsd.getEditLog().logRenameSnapshot(path, snapshotOldName, snapshotNewName, logRetryCache, now); + LOG.info("Snapshot renamed from {} to {} for SnapshotRoot {}", + snapshotOldName, snapshotNewName, path); } static SnapshottableDirectoryStatus[] getSnapshottableDirListing( @@ -271,6 +278,8 @@ static INode.BlocksMapUpdateInfo deleteSnapshot( final INode.BlocksMapUpdateInfo collectedBlocks = deleteSnapshot( fsd, snapshotManager, iip, snapshotName, now, snapshotRoot, logRetryCache); + LOG.info("Snapshot {} deleted for SnapshotRoot {}", + snapshotName, snapshotName); return collectedBlocks; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 70b12b3e198ff..cfb0d444051f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1899,6 +1899,8 @@ public DataEncryptionKey getDataEncryptionKey() throws IOException { public String createSnapshot(String snapshotRoot, String snapshotName) throws IOException { checkNNStartup(); + LOG.debug("*DIR* NameNode.createSnapshot: Path {} and SnapshotName {}", + snapshotRoot, snapshotName); if (!checkPathLength(snapshotRoot)) { throw new IOException("createSnapshot: Pathname too long. Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); @@ -1925,6 +1927,8 @@ public String createSnapshot(String snapshotRoot, String snapshotName) public void deleteSnapshot(String snapshotRoot, String snapshotName) throws IOException { checkNNStartup(); + LOG.debug("*DIR* NameNode.deleteSnapshot: Path {} and SnapshotName {}", + snapshotRoot, snapshotName); if (snapshotName == null || snapshotName.isEmpty()) { throw new IOException("The snapshot name is null or empty."); } @@ -1964,6 +1968,9 @@ public void disallowSnapshot(String snapshot) throws IOException { public void renameSnapshot(String snapshotRoot, String snapshotOldName, String snapshotNewName) throws IOException { checkNNStartup(); + LOG.debug("*DIR* NameNode.renameSnapshot: Snapshot Path {}, " + + "snapshotOldName {}, snapshotNewName {}", snapshotRoot, + snapshotOldName, snapshotNewName); if (snapshotNewName == null || snapshotNewName.isEmpty()) { throw new IOException("The new snapshot name is null or empty."); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 16e654c04cdf1..68bf998e0d8f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -457,7 +457,8 @@ public String createSnapshot(final LeaseManager leaseManager, // requests. throw new SnapshotException( "Failed to create the snapshot. The FileSystem has run out of " + - "snapshot IDs and ID rollover is not supported."); + "snapshot IDs and ID rollover is not supported " + + "and the max snapshot limit is: " + maxSnapshotLimit); } int n = numSnapshots.get(); checkFileSystemSnapshotLimit(n); From 48ff3b0966bbddec28711f275056c3dc947de0d1 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 12 Mar 2021 18:04:00 +0900 Subject: [PATCH 089/666] HADOOP-17582. Replace GitHub App Token with GitHub OAuth token (#2766) Reviewed-by: Ayush Saxena --- dev-support/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index f18b3c6e2f0fc..d2266c5530bc2 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -60,7 +60,7 @@ pipeline { stage ('precommit-run') { steps { withCredentials( - [usernamePassword(credentialsId: '683f5dcf-5552-4b28-9fb1-6a6b77cf53dd', + [usernamePassword(credentialsId: 'apache-hadoop-at-github.com', passwordVariable: 'GITHUB_TOKEN', usernameVariable: 'GITHUB_USER'), usernamePassword(credentialsId: 'hadoopqa-at-asf-jira', From 94826fb1ab736a68e9448756a6fce1ef09081d89 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 12 Mar 2021 14:10:16 +0100 Subject: [PATCH 090/666] YARN-10412. Move CS placement rule related changes to a separate package. Contributed by Gergely Pollak --- .../placement/CSMappingPlacementRule.java | 5 +- .../placement/VariableContext.java | 2 +- .../{ => csmappingrule}/MappingQueuePath.java | 2 +- .../{ => csmappingrule}/MappingRule.java | 3 +- .../MappingRuleAction.java | 3 +- .../MappingRuleActionBase.java | 4 +- .../MappingRuleActions.java | 3 +- .../MappingRuleMatcher.java | 4 +- .../MappingRuleMatchers.java | 4 +- .../MappingRuleResult.java | 2 +- .../MappingRuleResultType.java | 2 +- .../MappingRuleValidationContext.java | 4 +- .../MappingRuleValidationContextImpl.java | 4 +- .../MappingRuleValidationHelper.java | 2 +- .../CapacitySchedulerConfiguration.java | 148 +++++++++--------- .../placement/MappingRuleCreator.java | 10 +- .../converter/LegacyMappingRuleToJson.java | 3 +- .../scheduler/fair/QueueManager.java | 23 ++- .../placement/MockQueueHierarchyBuilder.java | 2 +- .../TestAppNameMappingPlacementRule.java | 1 + .../TestUserGroupMappingPlacementRule.java | 1 + .../TestCSMappingPlacementRule.java | 7 +- .../{ => csmappingrule}/TestMappingRule.java | 3 +- .../TestMappingRuleActions.java | 3 +- .../TestMappingRuleMatchers.java | 3 +- .../TestMappingRuleValidationContextImpl.java | 3 +- .../TestVariableContext.java | 3 +- .../scheduler/capacity/TestQueueMappings.java | 10 +- .../placement/TestMappingRuleCreator.java | 6 +- .../TestLegacyMappingRuleToJson.java | 2 +- 30 files changed, 144 insertions(+), 128 deletions(-) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingQueuePath.java (97%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRule.java (97%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRuleAction.java (94%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRuleActionBase.java (95%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRuleActions.java (98%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRuleMatcher.java (87%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRuleMatchers.java (98%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRuleResult.java (98%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRuleResultType.java (94%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRuleValidationContext.java (96%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRuleValidationContextImpl.java (98%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/MappingRuleValidationHelper.java (98%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/TestCSMappingPlacementRule.java (98%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/TestMappingRule.java (96%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/TestMappingRuleActions.java (97%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/TestMappingRuleMatchers.java (98%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/TestMappingRuleValidationContextImpl.java (98%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/{ => csmappingrule}/TestVariableContext.java (98%) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java index 821d055ae1b7c..04dbb649372dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java @@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.*; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; @@ -74,12 +75,12 @@ public class CSMappingPlacementRule extends PlacementRule { private boolean failOnConfigError = true; @VisibleForTesting - void setGroups(Groups groups) { + public void setGroups(Groups groups) { this.groups = groups; } @VisibleForTesting - void setFailOnConfigError(boolean failOnConfigError) { + public void setFailOnConfigError(boolean failOnConfigError) { this.failOnConfigError = failOnConfigError; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/VariableContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/VariableContext.java index 9fc42af9b57a7..0a30e926b3e6c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/VariableContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/VariableContext.java @@ -55,7 +55,7 @@ public class VariableContext { * @param name Name of the variable to check * @return true if the variable is immutable */ - boolean isImmutable(String name) { + public boolean isImmutable(String name) { return (immutableNames != null && immutableNames.contains(name)); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingQueuePath.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingQueuePath.java similarity index 97% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingQueuePath.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingQueuePath.java index b1ccf70eda9e9..2a0dfb43fe638 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingQueuePath.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingQueuePath.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRule.java similarity index 97% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRule.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRule.java index 9d67d7815e475..4f05f647af1d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRule.java @@ -16,9 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; /** * Mapping rule represents a single mapping setting defined by the user. All diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleAction.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleAction.java similarity index 94% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleAction.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleAction.java index 3b66d30b221e6..36b2c8cf0eaab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleAction.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleAction.java @@ -16,9 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; /** * This interface represents the action part of a MappingRule, action are diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleActionBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleActionBase.java similarity index 95% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleActionBase.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleActionBase.java index b0c86ea6de9d1..6c93c211bf164 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleActionBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleActionBase.java @@ -16,7 +16,9 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; + +import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; /** * This class implements the fallback logic for MappingRuleActions, this can diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleActions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleActions.java similarity index 98% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleActions.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleActions.java index 3f19dcdf4c996..4826062753894 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleActions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleActions.java @@ -16,9 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; /** * This class contains all the actions and some helper methods to generate them. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleMatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleMatcher.java similarity index 87% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleMatcher.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleMatcher.java index d2650e9b478bb..c64478c4e825e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleMatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleMatcher.java @@ -16,7 +16,9 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; + +import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; public interface MappingRuleMatcher { /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleMatchers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleMatchers.java similarity index 98% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleMatchers.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleMatchers.java index 24f147b6063dc..9d56e89121c4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleMatchers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleMatchers.java @@ -16,7 +16,9 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; + +import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; import java.util.Arrays; import java.util.Set; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleResult.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleResult.java similarity index 98% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleResult.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleResult.java index eb66721e25c66..ed61e64ac637a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleResult.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleResult.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; /** * This class represents the outcome of an action. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleResultType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleResultType.java similarity index 94% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleResultType.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleResultType.java index c4ffa1c15c7b3..d8d33b65c135f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleResultType.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleResultType.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; public enum MappingRuleResultType { /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleValidationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationContext.java similarity index 96% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleValidationContext.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationContext.java index 172ce419f5460..ca939f2747a42 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleValidationContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationContext.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -30,7 +30,7 @@ * interface should contain all validation related data and functions, this way * schedulers or engines can be changed without changing the MappingRules. */ -interface MappingRuleValidationContext { +public interface MappingRuleValidationContext { /** * This method should determine if the provided queue path can result in * a possible placement. It should fail if the provided path cannot be placed diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleValidationContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationContextImpl.java similarity index 98% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleValidationContextImpl.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationContextImpl.java index 1b768d4cdd2f0..bff6d4f4e0219 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleValidationContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationContextImpl.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; @@ -48,7 +48,7 @@ public class MappingRuleValidationContextImpl */ private final CapacitySchedulerQueueManager queueManager; - MappingRuleValidationContextImpl(CapacitySchedulerQueueManager qm) { + public MappingRuleValidationContextImpl(CapacitySchedulerQueueManager qm) { queueManager = qm; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleValidationHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationHelper.java similarity index 98% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleValidationHelper.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationHelper.java index d23f73577d436..f4f19e0ae12a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MappingRuleValidationHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationHelper.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 90979dc94dd99..5b2906062fc56 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -18,12 +18,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import org.apache.hadoop.ipc.WeightedTimeCostProvider; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRule; -import org.apache.hadoop.yarn.server.resourcemanager.placement.QueuePlacementRuleUtils; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRule; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.placement.MappingRuleCreator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,41 +83,41 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur LoggerFactory.getLogger(CapacitySchedulerConfiguration.class); private static final String CS_CONFIGURATION_FILE = "capacity-scheduler.xml"; - + @Private public static final String PREFIX = "yarn.scheduler.capacity."; - + @Private public static final String DOT = "."; - + @Private public static final String MAXIMUM_APPLICATIONS_SUFFIX = "maximum-applications"; - + @Private public static final String MAXIMUM_SYSTEM_APPLICATIONS = PREFIX + MAXIMUM_APPLICATIONS_SUFFIX; - + @Private public static final String MAXIMUM_AM_RESOURCE_SUFFIX = "maximum-am-resource-percent"; - + @Private public static final String MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT = PREFIX + MAXIMUM_AM_RESOURCE_SUFFIX; @Private public static final String QUEUES = "queues"; - + @Private public static final String CAPACITY = "capacity"; - + @Private public static final String MAXIMUM_CAPACITY = "maximum-capacity"; - + @Private public static final String USER_LIMIT = "minimum-user-limit-percent"; - + @Private public static final String USER_LIMIT_FACTOR = "user-limit-factor"; @@ -134,17 +132,17 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur @Private public static final String STATE = "state"; - + @Private public static final String ACCESSIBLE_NODE_LABELS = "accessible-node-labels"; - + @Private public static final String DEFAULT_NODE_LABEL_EXPRESSION = "default-node-label-expression"; public static final String RESERVE_CONT_LOOK_ALL_NODES = PREFIX + "reservations-continue-look-all-nodes"; - + @Private public static final boolean DEFAULT_RESERVE_CONT_LOOK_ALL_NODES = true; @@ -177,29 +175,29 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur public static final String DEFAULT_APP_ORDERING_POLICY = FIFO_APP_ORDERING_POLICY; - + @Private public static final int DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS = 10000; - + @Private - public static final float + public static final float DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT = 0.1f; @Private public static final float UNDEFINED = -1; - + @Private public static final float MINIMUM_CAPACITY_VALUE = 0; - + @Private public static final float MAXIMUM_CAPACITY_VALUE = 100; - + @Private public static final float DEFAULT_MAXIMUM_CAPACITY_VALUE = -1.0f; - + @Private public static final int DEFAULT_USER_LIMIT = 100; - + @Private public static final float DEFAULT_USER_LIMIT_FACTOR = 1.0f; @@ -217,17 +215,17 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur @Private public static final String RESOURCE_CALCULATOR_CLASS = PREFIX + "resource-calculator"; - @Private public static final Class + @Private public static final Class DEFAULT_RESOURCE_CALCULATOR_CLASS = DefaultResourceCalculator.class; - + @Private public static final String ROOT = "root"; - @Private - public static final String NODE_LOCALITY_DELAY = + @Private + public static final String NODE_LOCALITY_DELAY = PREFIX + "node-locality-delay"; - @Private + @Private public static final int DEFAULT_NODE_LOCALITY_DELAY = 40; @Private @@ -312,7 +310,7 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur @Private public static final Integer DEFAULT_CONFIGURATION_APPLICATION_PRIORITY = 0; - + @Private public static final String AVERAGE_CAPACITY = "average-capacity"; @@ -421,7 +419,7 @@ public enum AbsoluteResourceType { public CapacitySchedulerConfiguration() { this(new Configuration()); } - + public CapacitySchedulerConfiguration(Configuration configuration) { this(configuration, true); } @@ -454,15 +452,15 @@ private String getNodeLabelPrefix(String queue, String label) { } return getQueuePrefix(queue) + ACCESSIBLE_NODE_LABELS + DOT + label + DOT; } - + public int getMaximumSystemApplications() { - int maxApplications = + int maxApplications = getInt(MAXIMUM_SYSTEM_APPLICATIONS, DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS); return maxApplications; } - + public float getMaximumApplicationMasterResourcePercent() { - return getFloat(MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT, + return getFloat(MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT, DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT); } @@ -473,23 +471,23 @@ public float getMaximumApplicationMasterResourcePercent() { * @return setting specified or -1 if not set */ public int getMaximumApplicationsPerQueue(String queue) { - int maxApplicationsPerQueue = - getInt(getQueuePrefix(queue) + MAXIMUM_APPLICATIONS_SUFFIX, - (int)UNDEFINED); + int maxApplicationsPerQueue = + getInt(getQueuePrefix(queue) + MAXIMUM_APPLICATIONS_SUFFIX, + (int)UNDEFINED); return maxApplicationsPerQueue; } /** * Get the maximum am resource percent per queue setting. * @param queue name of the queue - * @return per queue setting or defaults to the global am-resource-percent + * @return per queue setting or defaults to the global am-resource-percent * setting if per queue setting not present */ public float getMaximumApplicationMasterResourcePerQueuePercent(String queue) { - return getFloat(getQueuePrefix(queue) + MAXIMUM_AM_RESOURCE_SUFFIX, + return getFloat(getQueuePrefix(queue) + MAXIMUM_AM_RESOURCE_SUFFIX, getMaximumApplicationMasterResourcePercent()); } - + public void setMaximumApplicationMasterResourcePerQueuePercent(String queue, float percent) { setFloat(getQueuePrefix(queue) + MAXIMUM_AM_RESOURCE_SUFFIX, percent); @@ -555,7 +553,7 @@ public float getNonLabeledQueueCapacity(String queue) { return capacity; } - + public void setCapacity(String queue, float capacity) { if (queue.equals("root")) { throw new IllegalArgumentException( @@ -599,7 +597,7 @@ public float getNonLabeledQueueMaximumCapacity(String queue) { : maxCapacity; return maxCapacity; } - + public void setMaximumCapacity(String queue, float maxCapacity) { if (maxCapacity > MAXIMUM_CAPACITY_VALUE) { throw new IllegalArgumentException("Illegal " + @@ -609,7 +607,7 @@ public void setMaximumCapacity(String queue, float maxCapacity) { LOG.debug("CSConf - setMaxCapacity: queuePrefix={}, maxCapacity={}", getQueuePrefix(queue), maxCapacity); } - + public void setCapacityByLabel(String queue, String label, float capacity) { setFloat(getNodeLabelPrefix(queue, label) + CAPACITY, capacity); } @@ -630,7 +628,7 @@ public void setMaximumCapacityByLabel(String queue, String label, set(getNodeLabelPrefix(queue, label) + MAXIMUM_CAPACITY, absoluteResourceCapacity); } - + public int getUserLimit(String queue) { int userLimit = getInt(getQueuePrefix(queue) + USER_LIMIT, DEFAULT_USER_LIMIT); @@ -643,12 +641,12 @@ public int getUserLimit(String queue) { @SuppressWarnings("unchecked") public OrderingPolicy getAppOrderingPolicy( String queue) { - + String policyType = get(getQueuePrefix(queue) + ORDERING_POLICY, DEFAULT_APP_ORDERING_POLICY); - + OrderingPolicy orderingPolicy; - + if (policyType.trim().equals(FIFO_APP_ORDERING_POLICY)) { policyType = FifoOrderingPolicy.class.getName(); } @@ -686,18 +684,18 @@ public void setUserLimit(String queue, int userLimit) { LOG.debug("here setUserLimit: queuePrefix={}, userLimit={}", getQueuePrefix(queue), getUserLimit(queue)); } - + public float getUserLimitFactor(String queue) { - float userLimitFactor = - getFloat(getQueuePrefix(queue) + USER_LIMIT_FACTOR, - DEFAULT_USER_LIMIT_FACTOR); + float userLimitFactor = + getFloat(getQueuePrefix(queue) + USER_LIMIT_FACTOR, + DEFAULT_USER_LIMIT_FACTOR); return userLimitFactor; } public void setUserLimitFactor(String queue, float userLimitFactor) { - setFloat(getQueuePrefix(queue) + USER_LIMIT_FACTOR, userLimitFactor); + setFloat(getQueuePrefix(queue) + USER_LIMIT_FACTOR, userLimitFactor); } - + public QueueState getConfiguredState(String queue) { String state = get(getQueuePrefix(queue) + STATE); if (state == null) { @@ -725,12 +723,12 @@ public void setAccessibleNodeLabels(String queue, Set labels) { String str = StringUtils.join(",", labels); set(getQueuePrefix(queue) + ACCESSIBLE_NODE_LABELS, str); } - + public Set getAccessibleNodeLabels(String queue) { String accessibleLabelStr = get(getQueuePrefix(queue) + ACCESSIBLE_NODE_LABELS); - // When accessible-label is null, + // When accessible-label is null, if (accessibleLabelStr == null) { // Only return null when queue is not ROOT if (!queue.equals(ROOT)) { @@ -757,7 +755,7 @@ public Set getAccessibleNodeLabels(String queue) { set.add(str.trim()); } } - + // if labels contains "*", only keep ANY behind if (set.contains(RMNodeLabelsManager.ANY)) { set.clear(); @@ -813,15 +811,15 @@ private float internalGetLabeledQueueCapacity(String queue, String label, } return capacity; } - + public float getLabeledQueueCapacity(String queue, String label) { return internalGetLabeledQueueCapacity(queue, label, CAPACITY, 0f); } - + public float getLabeledQueueMaximumCapacity(String queue, String label) { return internalGetLabeledQueueCapacity(queue, label, MAXIMUM_CAPACITY, 100f); } - + public String getDefaultNodeLabelExpression(String queue) { String defaultLabelExpression = get(getQueuePrefix(queue) + DEFAULT_NODE_LABEL_EXPRESSION); @@ -830,7 +828,7 @@ public String getDefaultNodeLabelExpression(String queue) { } return defaultLabelExpression.trim(); } - + public void setDefaultNodeLabelExpression(String queue, String exp) { set(getQueuePrefix(queue) + DEFAULT_NODE_LABEL_EXPRESSION, exp); } @@ -860,7 +858,7 @@ public boolean getReservationContinueLook() { return getBoolean(RESERVE_CONT_LOOK_ALL_NODES, DEFAULT_RESERVE_CONT_LOOK_ALL_NODES); } - + private static String getAclKey(QueueACL acl) { return "acl_" + StringUtils.toLowerCase(acl.toString()); } @@ -987,13 +985,13 @@ public String[] getQueues(String queue) { return queues; } - + public void setQueues(String queue, String[] subQueues) { set(getQueuePrefix(queue) + QUEUES, StringUtils.arrayToString(subQueues)); LOG.debug("CSConf - setQueues: qPrefix={}, queues={}", getQueuePrefix(queue), StringUtils.arrayToString(subQueues)); } - + public Resource getMinimumAllocation() { int minimumMemory = getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, @@ -1087,9 +1085,9 @@ public boolean getRackLocalityFullReset() { public ResourceCalculator getResourceCalculator() { return ReflectionUtils.newInstance( getClass( - RESOURCE_CALCULATOR_CLASS, - DEFAULT_RESOURCE_CALCULATOR_CLASS, - ResourceCalculator.class), + RESOURCE_CALCULATOR_CLASS, + DEFAULT_RESOURCE_CALCULATOR_CLASS, + ResourceCalculator.class), this); } @@ -1101,8 +1099,8 @@ public boolean getUsePortForNodeName() { public void setResourceComparator( Class resourceCalculatorClass) { setClass( - RESOURCE_CALCULATOR_CLASS, - resourceCalculatorClass, + RESOURCE_CALCULATOR_CLASS, + resourceCalculatorClass, ResourceCalculator.class); } @@ -1488,18 +1486,18 @@ public long getEnforcementWindow(String queue) { * Sets the disable_preemption property in order to indicate * whether or not container preemption will be disabled for the specified * queue. - * + * * @param queue queue path * @param preemptionDisabled true if preemption is disabled on queue */ public void setPreemptionDisabled(String queue, boolean preemptionDisabled) { setBoolean(getQueuePrefix(queue) + QUEUE_PREEMPTION_DISABLED, - preemptionDisabled); + preemptionDisabled); } /** * Indicates whether preemption is disabled on the specified queue. - * + * * @param queue queue path to query * @param defaultVal used as default if the disable_preemption * is not set in the configuration @@ -1533,7 +1531,7 @@ public boolean getIntraQueuePreemptionDisabled(String queue, public Set getConfiguredNodeLabels(String queuePath) { Set configuredNodeLabels = new HashSet(); Entry e = null; - + Iterator> iter = iterator(); while (iter.hasNext()) { e = iter.next(); @@ -1551,10 +1549,10 @@ public Set getConfiguredNodeLabels(String queuePath) { configuredNodeLabels.add(labelName); } } - + // always add NO_LABEL configuredNodeLabels.add(RMNodeLabelsManager.NO_LABEL); - + return configuredNodeLabels; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/MappingRuleCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/MappingRuleCreator.java index 11eb5bff679be..1c6f324cf07ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/MappingRuleCreator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/MappingRuleCreator.java @@ -27,11 +27,11 @@ import java.util.List; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRule; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRuleAction; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRuleActions; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRuleMatcher; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRuleMatchers; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRule; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRuleAction; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRuleActions; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRuleMatcher; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRuleMatchers; // These are generated classes - use GeneratePojos class to create them // if they are missing diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/converter/LegacyMappingRuleToJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/converter/LegacyMappingRuleToJson.java index 6aa404992afcb..5ae5832d9b403 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/converter/LegacyMappingRuleToJson.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/converter/LegacyMappingRuleToJson.java @@ -22,11 +22,10 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingQueuePath; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingQueuePath; import java.util.ArrayList; import java.util.Collection; -import java.util.HashSet; public class LegacyMappingRuleToJson { //Legacy rule parse helper constants diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index 5c2664483fcf4..d620077ca0198 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -18,6 +18,17 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -29,18 +40,6 @@ import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Unstable; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; - /** * Maintains a list of queues as well as scheduling parameters for each queue, * such as guaranteed share allocations, from the fair scheduler config file. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MockQueueHierarchyBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MockQueueHierarchyBuilder.java index f7c8a99ce38d4..b268e5acdb505 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MockQueueHierarchyBuilder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MockQueueHierarchyBuilder.java @@ -36,7 +36,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -class MockQueueHierarchyBuilder { +public class MockQueueHierarchyBuilder { private static final String ROOT = "root"; private static final String QUEUE_SEP = "."; private List queuePaths = Lists.newArrayList(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java index 9bcac9ace4304..3665dd037ec04 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java @@ -24,6 +24,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRule; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java index d93496ba019e2..84a3e6c34aa3c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java @@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.MappingType; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.QueueMappingBuilder; import org.apache.hadoop.yarn.server.resourcemanager.placement.TestUserGroupMappingPlacementRule.QueueMappingTestData.QueueMappingTestDataBuilder; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRule; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestCSMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java similarity index 98% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestCSMappingPlacementRule.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java index f0b19c45d0ce9..c3cd6dfdc6883 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestCSMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; @@ -24,6 +24,9 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.placement.ApplicationPlacementContext; +import org.apache.hadoop.yarn.server.resourcemanager.placement.CSMappingPlacementRule; +import org.apache.hadoop.yarn.server.resourcemanager.placement.MockQueueHierarchyBuilder; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager; @@ -48,12 +51,12 @@ import static junit.framework.TestCase.assertNull; import static junit.framework.TestCase.assertTrue; import static junit.framework.TestCase.fail; -import static org.apache.hadoop.yarn.server.resourcemanager.placement.FairQueuePlacementUtils.DOT; import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class TestCSMappingPlacementRule { + public static final String DOT = "."; private static final Logger LOG = LoggerFactory .getLogger(TestCSMappingPlacementRule.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRule.java similarity index 96% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRule.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRule.java index 1b734f5bb6fe0..eb0441ca963d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRule.java @@ -16,13 +16,14 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; import org.junit.Test; public class TestMappingRule { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRuleActions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleActions.java similarity index 97% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRuleActions.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleActions.java index 4d4daa135693b..9224e673325dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRuleActions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleActions.java @@ -16,7 +16,8 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; +import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; import org.junit.Test; import static org.junit.Assert.*; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRuleMatchers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleMatchers.java similarity index 98% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRuleMatchers.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleMatchers.java index e66dd915f841d..3e06863d26675 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRuleMatchers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleMatchers.java @@ -16,10 +16,11 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import junit.framework.TestCase; +import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; import org.junit.Test; import java.util.HashMap; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRuleValidationContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleValidationContextImpl.java similarity index 98% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRuleValidationContextImpl.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleValidationContextImpl.java index 383f70eed651c..a015e315c35a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestMappingRuleValidationContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleValidationContextImpl.java @@ -16,9 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.placement.MockQueueHierarchyBuilder; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager; import org.junit.Test; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestVariableContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestVariableContext.java similarity index 98% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestVariableContext.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestVariableContext.java index 07872a873b11e..30d265b9e8c85 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestVariableContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestVariableContext.java @@ -16,9 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.placement; +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; import org.junit.Test; import java.util.HashMap; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java index dcd0fe0bc38dc..695ceb782de88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRule; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRule; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,10 +42,10 @@ public class TestQueueMappings { CapacitySchedulerConfiguration.ROOT + "." + Q1; private final static String Q2_PATH = CapacitySchedulerConfiguration.ROOT + "." + Q2; - + private CapacityScheduler cs; private YarnConfiguration conf; - + @Before public void setup() { CapacitySchedulerConfiguration csConf = @@ -70,7 +70,7 @@ private void setupQueueConfiguration(CapacitySchedulerConfiguration conf) { LOG.info("Setup top-level queues q1 and q2"); } - + @Test public void testQueueMappingSpecifyingNotExistedQueue() { // if the mapping specifies a queue that does not exist, reinitialize will @@ -85,7 +85,7 @@ public void testQueueMappingSpecifyingNotExistedQueue() { } Assert.assertTrue("queue initialization failed for non-existent q", fail); } - + @Test public void testQueueMappingTrimSpaces() throws IOException { // space trimming diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/TestMappingRuleCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/TestMappingRuleCreator.java index 778adcbce3ae1..06f2bc6bc207e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/TestMappingRuleCreator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/TestMappingRuleCreator.java @@ -23,9 +23,9 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRule; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRuleResult; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRuleResultType; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRule; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRuleResult; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRuleResultType; import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.placement.schema.MappingRulesDescription; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.placement.schema.Rule; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/converter/TestLegacyMappingRuleToJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/converter/TestLegacyMappingRuleToJson.java index 9a2b97f58a7d8..5d0c12e0827ba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/converter/TestLegacyMappingRuleToJson.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/placement/converter/TestLegacyMappingRuleToJson.java @@ -19,7 +19,7 @@ import static org.junit.Assert.*; -import org.apache.hadoop.yarn.server.resourcemanager.placement.MappingRule; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRule; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.junit.Test; From 2480e1bdc3f1ca9e65b15dad085566c755ca5e81 Mon Sep 17 00:00:00 2001 From: Jim Brennan Date: Fri, 12 Mar 2021 17:17:31 +0000 Subject: [PATCH 091/666] [YARN-10687] Add option to disable/enable free disk space checking and percentage checking for full and not-full disks. Contributed by Qi Zhu. --- .../hadoop/yarn/conf/YarnConfiguration.java | 27 +++++ .../src/main/resources/yarn-default.xml | 20 +++- .../nodemanager/DirectoryCollection.java | 43 +++++++- .../nodemanager/TestDirectoryCollection.java | 99 +++++++++++++++++++ 4 files changed, 185 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index a8a87ad8c9ce8..1888ffb64c045 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2027,6 +2027,8 @@ public static boolean isAclEnabled(Configuration conf) { * marked as offline. Values can range from 0.0 to 100.0. If the value is * greater than or equal to 100, NM will check for full disk. This applies to * nm-local-dirs and nm-log-dirs. + * + * This applies when disk-utilization-threshold.enabled is true. */ public static final String NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE = NM_DISK_HEALTH_CHECK_PREFIX + "max-disk-utilization-per-disk-percentage"; @@ -2036,6 +2038,17 @@ public static boolean isAclEnabled(Configuration conf) { public static final float DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE = 90.0F; + /** + * Enable/Disable the disk utilisation percentage + * threshold for disk health checker. + */ + public static final String NM_DISK_UTILIZATION_THRESHOLD_ENABLED = + NM_DISK_HEALTH_CHECK_PREFIX + + "disk-utilization-threshold.enabled"; + + public static final + boolean DEFAULT_NM_DISK_UTILIZATION_THRESHOLD_ENABLED = true; + /** * The low threshold percentage of disk space used when an offline disk is * marked as online. Values can range from 0.0 to 100.0. The value shouldn't @@ -2051,9 +2064,23 @@ public static boolean isAclEnabled(Configuration conf) { /** * The minimum space that must be available on a local dir for it to be used. * This applies to nm-local-dirs and nm-log-dirs. + * + * This applies when disk-free-space-threshold.enabled is true. */ public static final String NM_MIN_PER_DISK_FREE_SPACE_MB = NM_DISK_HEALTH_CHECK_PREFIX + "min-free-space-per-disk-mb"; + + /** + * Enable/Disable the minimum disk free + * space threshold for disk health checker. + */ + public static final String NM_DISK_FREE_SPACE_THRESHOLD_ENABLED = + NM_DISK_HEALTH_CHECK_PREFIX + + "disk-free-space-threshold.enabled"; + + public static final boolean + DEFAULT_NM_DISK_FREE_SPACE_THRESHOLD_ENABLED = true; + /** * The minimum space that must be available on an offline * disk for it to be marked as online. The value should not be less diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index f7d9fc1d2b070..12fc32fb9ca50 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -1809,12 +1809,27 @@ 0.25 + + Enable/Disable the disk utilisation percentage + threshold for disk health checker. + yarn.nodemanager.disk-health-checker.disk-utilization-threshold.enabled + true + + + + Enable/Disable the minimum disk free + space threshold for disk health checker. + yarn.nodemanager.disk-health-checker.disk-free-space-threshold.enabled + true + + The maximum percentage of disk space utilization allowed after which a disk is marked as bad. Values can range from 0.0 to 100.0. If the value is greater than or equal to 100, the nodemanager will check for full disk. This applies to yarn.nodemanager.local-dirs and - yarn.nodemanager.log-dirs. + yarn.nodemanager.log-dirs when + yarn.nodemanager.disk-health-checker.disk-utilization-threshold.enabled is true. yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage 90.0 @@ -1834,7 +1849,8 @@ The minimum space in megabytes that must be available on a disk for it to be used. If space on a disk falls below this threshold, it will be marked as bad. This applies to yarn.nodemanager.local-dirs and - yarn.nodemanager.log-dirs. + yarn.nodemanager.log-dirs when + yarn.nodemanager.disk-health-checker.disk-free-space-threshold.enabled is true. yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb 0 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java index 5b32e0e2d845c..27bdea7b7e460 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java @@ -59,6 +59,9 @@ public class DirectoryCollection { private final Configuration conf; private final DiskValidator diskValidator; + + private boolean diskUtilizationThresholdEnabled; + private boolean diskFreeSpaceThresholdEnabled; /** * The enum defines disk failure type. */ @@ -239,6 +242,17 @@ public DirectoryCollection(String[] dirs, throw new YarnRuntimeException(e); } + diskUtilizationThresholdEnabled = conf. + getBoolean(YarnConfiguration. + NM_DISK_UTILIZATION_THRESHOLD_ENABLED, + YarnConfiguration. + DEFAULT_NM_DISK_UTILIZATION_THRESHOLD_ENABLED); + diskFreeSpaceThresholdEnabled = conf. + getBoolean(YarnConfiguration. + NM_DISK_FREE_SPACE_THRESHOLD_ENABLED, + YarnConfiguration. + DEFAULT_NM_DISK_FREE_SPACE_THRESHOLD_ENABLED); + localDirs = new ArrayList<>(Arrays.asList(dirs)); errorDirs = new ArrayList<>(); fullDirs = new ArrayList<>(); @@ -520,7 +534,9 @@ Map testDirs(List dirs, diskUtilizationPercentageCutoffHigh : diskUtilizationPercentageCutoffLow; long diskFreeSpaceCutoff = goodDirs.contains(dir) ? diskFreeSpaceCutoffLow : diskFreeSpaceCutoffHigh; - if (isDiskUsageOverPercentageLimit(testDir, + + if (diskUtilizationThresholdEnabled + && isDiskUsageOverPercentageLimit(testDir, diskUtilizationPercentageCutoff)) { msg = "used space above threshold of " @@ -529,7 +545,8 @@ Map testDirs(List dirs, ret.put(dir, new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg)); continue; - } else if (isDiskFreeSpaceUnderLimit(testDir, diskFreeSpaceCutoff)) { + } else if (diskFreeSpaceThresholdEnabled + && isDiskFreeSpaceUnderLimit(testDir, diskFreeSpaceCutoff)) { msg = "free space below limit of " + diskFreeSpaceCutoff + "MB"; @@ -613,6 +630,28 @@ long getDiskUtilizationSpaceCutoffHigh() { return diskFreeSpaceCutoffHigh; } + @VisibleForTesting + boolean getDiskUtilizationThresholdEnabled() { + return diskUtilizationThresholdEnabled; + } + + @VisibleForTesting + boolean getDiskFreeSpaceThresholdEnabled() { + return diskFreeSpaceThresholdEnabled; + } + + @VisibleForTesting + void setDiskUtilizationThresholdEnabled(boolean + utilizationEnabled) { + diskUtilizationThresholdEnabled = utilizationEnabled; + } + + @VisibleForTesting + void setDiskFreeSpaceThresholdEnabled(boolean + freeSpaceEnabled) { + diskFreeSpaceThresholdEnabled = freeSpaceEnabled; + } + public void setDiskUtilizationSpaceCutoff(long freeSpaceCutoff) { setDiskUtilizationSpaceCutoff(freeSpaceCutoff, freeSpaceCutoff); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java index b99c7d625ee7e..59a30370283de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java @@ -176,6 +176,105 @@ public void testDiskSpaceUtilizationLimit() throws IOException { dc.getGoodDirsDiskUtilizationPercentage()); } + @Test + public void testDiskSpaceUtilizationThresholdEnabled() throws IOException { + + String dirA = new File(testDir, "dirA").getPath(); + String[] dirs = {dirA}; + DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F); + + // Disable disk utilization threshold. + dc.setDiskUtilizationThresholdEnabled(false); + Assert.assertFalse(dc.getDiskUtilizationThresholdEnabled()); + + dc.checkDirs(); + Assert.assertEquals(1, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(0, dc.getFailedDirs().size()); + Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); + + // Enable disk utilization threshold. + dc.setDiskUtilizationThresholdEnabled(true); + Assert.assertTrue(dc.getDiskUtilizationThresholdEnabled()); + + dc.checkDirs(); + Assert.assertEquals(0, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(1, dc.getFailedDirs().size()); + Assert.assertEquals(1, dc.getFullDirs().size()); + Assert.assertNotNull(dc.getDirectoryErrorInfo(dirA)); + Assert.assertEquals(DirectoryCollection.DiskErrorCause.DISK_FULL, + dc.getDirectoryErrorInfo(dirA).cause); + + // no good dirs + Assert.assertEquals(0, + dc.getGoodDirsDiskUtilizationPercentage()); + + dc = new DirectoryCollection(dirs, 100.0F); + int utilizedSpacePerc = + (int) ((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 / + testDir.getTotalSpace()); + dc.checkDirs(); + Assert.assertEquals(1, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(0, dc.getFailedDirs().size()); + Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); + + Assert.assertEquals(utilizedSpacePerc, + dc.getGoodDirsDiskUtilizationPercentage()); + + dc = new DirectoryCollection(dirs, + testDir.getTotalSpace() / (1024 * 1024)); + + // Disable disk utilization threshold. + dc.setDiskUtilizationThresholdEnabled(false); + Assert.assertFalse(dc.getDiskUtilizationThresholdEnabled()); + + // Disable disk free space threshold. + dc.setDiskFreeSpaceThresholdEnabled(false); + Assert.assertFalse(dc.getDiskFreeSpaceThresholdEnabled()); + dc.checkDirs(); + + Assert.assertEquals(1, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(0, dc.getFailedDirs().size()); + Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); + + dc = new DirectoryCollection(dirs, + testDir.getTotalSpace() / (1024 * 1024)); + + // Enable disk free space threshold. + dc.setDiskFreeSpaceThresholdEnabled(true); + Assert.assertTrue(dc.getDiskFreeSpaceThresholdEnabled()); + + dc.checkDirs(); + + Assert.assertEquals(0, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(1, dc.getFailedDirs().size()); + Assert.assertEquals(1, dc.getFullDirs().size()); + Assert.assertNotNull(dc.getDirectoryErrorInfo(dirA)); + // no good dirs + Assert.assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage()); + + dc = new DirectoryCollection(dirs, 100.0F, 100.0F, 0); + utilizedSpacePerc = + (int)((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 / + testDir.getTotalSpace()); + dc.checkDirs(); + Assert.assertEquals(1, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(0, dc.getFailedDirs().size()); + Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); + + Assert.assertEquals(utilizedSpacePerc, + dc.getGoodDirsDiskUtilizationPercentage()); + } + @Test public void testDiskLimitsCutoffSetters() throws IOException { From 44df69eea7f1da11634b50746dd37cbe0f0cb9da Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Fri, 12 Mar 2021 11:27:53 -0800 Subject: [PATCH 092/666] HADOOP-17581. Fix reference to LOG is ambiguous after HADOOP-17482. (#2764) --- .../test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java | 3 +-- .../org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 030f56aad8adc..4404c962b81f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -1421,8 +1421,7 @@ public void testDelegationToken() throws Exception { Credentials creds = new Credentials(); final Token tokens[] = dfs.addDelegationTokens("JobTracker", creds); - DistributedFileSystem.LOG.debug("Delegation tokens: " + - Arrays.asList(tokens)); + LOG.debug("Delegation tokens: " + Arrays.asList(tokens)); Assert.assertEquals(2, tokens.length); Assert.assertEquals(tokens[1], testToken); Assert.assertEquals(2, creds.numberOfTokens()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java index f65b6b13dc554..d29db2bffc3c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java @@ -99,8 +99,7 @@ public void testDelegationToken() throws Exception { Credentials creds = new Credentials(); Token tokens[] = fs.addDelegationTokens(renewer, creds); - DistributedFileSystem.LOG.debug("Delegation tokens: " + - Arrays.asList(tokens)); + LOG.debug("Delegation tokens: " + Arrays.asList(tokens)); Assert.assertEquals(2, tokens.length); Assert.assertEquals(2, creds.numberOfTokens()); From b5ffc0d01c2f91ef0344009505d61154eaae2475 Mon Sep 17 00:00:00 2001 From: lamberken <2217232293@qq.com> Date: Sat, 13 Mar 2021 14:22:11 +0800 Subject: [PATCH 093/666] HDFS-15882. Fix incorrectly initializing RandomAccessFile based on configuration options (#2751). Contributed by Xie Lei. Reviewed-by: He Xiaoqiao --- .../hadoop/hdfs/server/namenode/EditLogFileOutputStream.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java index 4919ea44e0f72..493a542d1b2dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java @@ -84,9 +84,9 @@ public EditLogFileOutputStream(Configuration conf, File name, int size) doubleBuf = new EditsDoubleBuffer(size); RandomAccessFile rp; if (shouldSyncWritesAndSkipFsync) { - rp = new RandomAccessFile(name, "rws"); - } else { rp = new RandomAccessFile(name, "rw"); + } else { + rp = new RandomAccessFile(name, "rws"); } fp = new FileOutputStream(rp.getFD()); // open for append fc = rp.getChannel(); From afbd79f7d34614135f08e4d0461fcfecb8f3bdbf Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sat, 13 Mar 2021 15:09:03 +0530 Subject: [PATCH 094/666] HADOOP-17586. Upgrade org.codehaus.woodstox:stax2-api to 4.2.1. (#2769). Contributed by Ayush Saxena. Signed-off-by: Mingliang Liu --- LICENSE-binary | 4 ++-- hadoop-project/pom.xml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/LICENSE-binary b/LICENSE-binary index 9053b60e13786..198f97c584393 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -225,7 +225,7 @@ com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.9.9 com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.9.9 com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.9.9 com.fasterxml.uuid:java-uuid-generator:3.1.4 -com.fasterxml.woodstox:woodstox-core:5.0.3 +com.fasterxml.woodstox:woodstox-core:5.3.0 com.github.davidmoten:rxjava-extras:0.8.0.17 com.github.stephenc.jcip:jcip-annotations:1.0-1 com.google:guice:4.0 @@ -366,7 +366,7 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanage com.github.luben:zstd-jni:1.4.3-1 dnsjava:dnsjava:2.1.7 -org.codehaus.woodstox:stax2-api:3.1.4 +org.codehaus.woodstox:stax2-api:4.2.1 BSD 3-Clause diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index c0e8e2ca4c81a..6a0813f906830 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1127,7 +1127,7 @@ org.codehaus.woodstox stax2-api - 3.1.4 + 4.2.1 com.fasterxml.woodstox From 91bc0b0a669eea10f70cdfb6d5ff64945d64f8ce Mon Sep 17 00:00:00 2001 From: litao Date: Sun, 14 Mar 2021 06:37:31 +0800 Subject: [PATCH 095/666] HDFS-15884. RBF: Remove unused method getCreateLocation in RouterRpcServer (#2754). Contributed by tomscut. --- .../server/federation/router/RouterRpcServer.java | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 1d0800e4bd833..6bf159f7886a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -776,20 +776,6 @@ public HdfsFileStatus create(String src, FsPermission masked, replication, blockSize, supportedVersions, ecPolicyName, storagePolicy); } - - /** - * Get the location to create a file. It checks if the file already existed - * in one of the locations. - * - * @param src Path of the file to check. - * @return The remote location for this file. - * @throws IOException If the file has no creation location. - */ - RemoteLocation getCreateLocation(final String src) throws IOException { - final List locations = getLocationsForPath(src, true); - return getCreateLocation(src, locations); - } - /** * Get the location to create a file. It checks if the file already existed * in one of the locations. From c5642659b8b9f808dbd51c4470e1fba540216c7b Mon Sep 17 00:00:00 2001 From: sumangala-patki <70206833+sumangala-patki@users.noreply.github.com> Date: Sun, 14 Mar 2021 13:35:02 +0530 Subject: [PATCH 096/666] HADOOP-17548. ABFS: Toggle Store Mkdirs request overwrite parameter (#2729) Contributed by Sumangala Patki. --- .../hadoop/fs/azurebfs/AbfsConfiguration.java | 9 +++++ .../fs/azurebfs/AzureBlobFileSystem.java | 2 +- .../fs/azurebfs/AzureBlobFileSystemStore.java | 5 ++- .../azurebfs/constants/ConfigurationKeys.java | 1 + .../constants/FileSystemConfigurations.java | 1 + .../constants/HttpHeaderConfigurations.java | 1 + .../fs/azurebfs/services/AbfsClient.java | 13 +++++- .../ITestAzureBlobFileSystemMkDir.java | 40 ++++++++++++++++++- 8 files changed, 67 insertions(+), 5 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index 193be48029a34..f36cc7d5bfde0 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -199,6 +199,11 @@ public class AbfsConfiguration{ DefaultValue = DEFAULT_FS_AZURE_ENABLE_CONDITIONAL_CREATE_OVERWRITE) private boolean enableConditionalCreateOverwrite; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = + FS_AZURE_ENABLE_MKDIR_OVERWRITE, DefaultValue = + DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE) + private boolean mkdirOverwrite; + @StringConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_APPEND_BLOB_KEY, DefaultValue = DEFAULT_FS_AZURE_APPEND_BLOB_DIRECTORIES) private String azureAppendBlobDirs; @@ -621,6 +626,10 @@ public boolean isConditionalCreateOverwriteEnabled() { return this.enableConditionalCreateOverwrite; } + public boolean isEnabledMkdirOverwrite() { + return mkdirOverwrite; + } + public String getAppendBlobDirs() { return this.azureAppendBlobDirs; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index ed607b38e6bfc..d8a2ed7bcd542 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -473,7 +473,7 @@ public boolean mkdirs(final Path f, final FsPermission permission) throws IOExce statIncrement(DIRECTORIES_CREATED); return true; } catch (AzureBlobFileSystemException ex) { - checkException(f, ex, AzureServiceErrorCode.PATH_ALREADY_EXISTS); + checkException(f, ex); return true; } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index 678f0b4f20953..75419c26dd214 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -601,7 +601,10 @@ public void createDirectory(final Path path, final FsPermission permission, fina umask, isNamespaceEnabled); - final AbfsRestOperation op = client.createPath(getRelativePath(path), false, true, + boolean overwrite = + !isNamespaceEnabled || abfsConfiguration.isEnabledMkdirOverwrite(); + final AbfsRestOperation op = client.createPath(getRelativePath(path), + false, overwrite, isNamespaceEnabled ? getOctalNotation(permission) : null, isNamespaceEnabled ? getOctalNotation(umask) : null, false, null); perfInfo.registerResult(op.getResult()).registerSuccess(true); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java index 585786491236e..02b143cd61bd3 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java @@ -83,6 +83,7 @@ public final class ConfigurationKeys { * overwritten only if there is a match on the eTag of existing file. */ public static final String FS_AZURE_ENABLE_CONDITIONAL_CREATE_OVERWRITE = "fs.azure.enable.conditional.create.overwrite"; + public static final String FS_AZURE_ENABLE_MKDIR_OVERWRITE = "fs.azure.enable.mkdir.overwrite"; /** Provides a config to provide comma separated path prefixes on which Appendblob based files are created * Default is empty. **/ public static final String FS_AZURE_APPEND_BLOB_KEY = "fs.azure.appendblob.directories"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java index 9b760c472a9ad..d90f525712af7 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java @@ -76,6 +76,7 @@ public final class FileSystemConfigurations { public static final String DEFAULT_FS_AZURE_ATOMIC_RENAME_DIRECTORIES = "/hbase"; public static final boolean DEFAULT_FS_AZURE_ENABLE_CONDITIONAL_CREATE_OVERWRITE = true; + public static final boolean DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE = true; public static final String DEFAULT_FS_AZURE_APPEND_BLOB_DIRECTORIES = ""; public static final int DEFAULT_READ_AHEAD_QUEUE_DEPTH = -1; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java index 79bba094f0e44..27ddcee695aaa 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java @@ -43,6 +43,7 @@ public final class HttpHeaderConfigurations { public static final String USER_AGENT = "User-Agent"; public static final String X_HTTP_METHOD_OVERRIDE = "X-HTTP-Method-Override"; public static final String X_MS_CLIENT_REQUEST_ID = "x-ms-client-request-id"; + public static final String X_MS_EXISTING_RESOURCE_TYPE = "x-ms-existing-resource-type"; public static final String X_MS_DATE = "x-ms-date"; public static final String X_MS_REQUEST_ID = "x-ms-request-id"; public static final String X_MS_VERSION = "x-ms-version"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index bfc11a676ae48..92b24f0dda212 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -302,7 +302,18 @@ public AbfsRestOperation createPath(final String path, final boolean isFile, fin HTTP_METHOD_PUT, url, requestHeaders); - op.execute(); + try { + op.execute(); + } catch (AzureBlobFileSystemException ex) { + if (!isFile && op.getResult().getStatusCode() == HttpURLConnection.HTTP_CONFLICT) { + String existingResource = + op.getResult().getResponseHeader(X_MS_EXISTING_RESOURCE_TYPE); + if (existingResource != null && existingResource.equals(DIRECTORY)) { + return op; //don't throw ex on mkdirs for existing directory + } + } + throw ex; + } return op; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java index de476a6abce9b..0db9529326702 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java @@ -20,15 +20,19 @@ import java.util.UUID; +import org.junit.Assume; import org.junit.Test; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs; - import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CONNECTIONS_MADE; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ENABLE_MKDIR_OVERWRITE; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Test mkdir operation. @@ -41,12 +45,44 @@ public ITestAzureBlobFileSystemMkDir() throws Exception { @Test public void testCreateDirWithExistingDir() throws Exception { + Assume.assumeTrue(DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE || !getFileSystem() + .getIsNamespaceEnabled()); final AzureBlobFileSystem fs = getFileSystem(); Path path = new Path("testFolder"); assertMkdirs(fs, path); assertMkdirs(fs, path); } + @Test + public void testMkdirExistingDirOverwriteFalse() throws Exception { + Assume.assumeFalse("Ignore test until default overwrite is set to false", + DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE); + Assume.assumeTrue("Ignore test for Non-HNS accounts", + getFileSystem().getIsNamespaceEnabled()); + //execute test only for HNS account with default overwrite=false + Configuration config = new Configuration(this.getRawConfiguration()); + config.set(FS_AZURE_ENABLE_MKDIR_OVERWRITE, Boolean.toString(false)); + AzureBlobFileSystem fs = getFileSystem(config); + Path path = new Path("testFolder"); + assertMkdirs(fs, path); //checks that mkdirs returns true + long timeCreated = fs.getFileStatus(path).getModificationTime(); + assertMkdirs(fs, path); //call to existing dir should return success + assertEquals("LMT should not be updated for existing dir", timeCreated, + fs.getFileStatus(path).getModificationTime()); + } + + @Test + public void createDirWithExistingFilename() throws Exception { + Assume.assumeFalse("Ignore test until default overwrite is set to false", + DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE && getFileSystem() + .getIsNamespaceEnabled()); + final AzureBlobFileSystem fs = getFileSystem(); + Path path = new Path("testFilePath"); + fs.create(path); + assertTrue(fs.getFileStatus(path).isFile()); + intercept(FileAlreadyExistsException.class, () -> fs.mkdirs(path)); + } + @Test public void testCreateRoot() throws Exception { assertMkdirs(getFileSystem(), new Path("/")); From ed408b4a083115b55d68f0356170eb0e71106641 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Sun, 14 Mar 2021 17:48:18 +0800 Subject: [PATCH 097/666] HDFS-15816. Fix shouldAvoidStaleDataNodesForWrite returns when no stale node in cluster. Contributed by Yang Yun. --- .../hdfs/server/blockmanagement/DatanodeManager.java | 2 +- .../blockmanagement/TestReplicationPolicy.java | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 01dfe04cb137a..454e4846f38fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1321,7 +1321,7 @@ public List getEnteringMaintenanceNodes() { public boolean shouldAvoidStaleDataNodesForWrite() { // If # stale exceeds maximum staleness ratio, disable stale // datanode avoidance on the write path - return avoidStaleDataNodesForWrite && + return avoidStaleDataNodesForWrite && numStaleNodes > 0 && (numStaleNodes <= heartbeatManager.getLiveDatanodeCount() * ratioUseStaleDataNodesForWrite); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index c7b080c0fbebd..57f61b41fef2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1664,4 +1664,16 @@ public void testChosenFailureForStorageType() { assertNotEquals(0, appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE")); } + + @Test + public void testReduceChooseTimesIfNOStaleNode() { + for(int i = 0; i < 6; i++) { + updateHeartbeatWithUsage(dataNodes[i], + 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE, 0L, + (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE, + 0L, 0L, 0L, 0, 0); + } + assertFalse(dnManager.shouldAvoidStaleDataNodesForWrite()); + resetHeartbeatForStorages(); + } } \ No newline at end of file From 22d1397dbd704ec9317e0f68abf9965f4276bc05 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Sun, 14 Mar 2021 18:09:50 +0800 Subject: [PATCH 098/666] HADOOP-17585. Correct timestamp format in the docs for the touch command. Contributed by Stephen O'Donnell. --- .../java/org/apache/hadoop/fs/shell/TouchCommands.java | 10 ++++++---- .../hadoop-common/src/site/markdown/FileSystemShell.md | 8 ++++---- .../hadoop-common/src/test/resources/testConf.xml | 6 +++--- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java index b81f2f7b97575..872de306d287a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java @@ -102,8 +102,8 @@ public static class Touch extends TouchCommands { public static final String NAME = "touch"; public static final String USAGE = "[-" + OPTION_CHANGE_ONLY_ACCESS_TIME + "] [-" + OPTION_CHANGE_ONLY_MODIFICATION_TIME + "] [-" - + OPTION_USE_TIMESTAMP + " TIMESTAMP ] [-" + OPTION_DO_NOT_CREATE_FILE - + "] ..."; + + OPTION_USE_TIMESTAMP + " TIMESTAMP (yyyyMMdd:HHmmss) ] " + + "[-" + OPTION_DO_NOT_CREATE_FILE + "] ..."; public static final String DESCRIPTION = "Updates the access and modification times of the file specified by the" + " to the current time. If the file does not exist, then a zero" @@ -114,7 +114,8 @@ public static class Touch extends TouchCommands { + OPTION_CHANGE_ONLY_MODIFICATION_TIME + " Change only the modification time \n" + "-" + OPTION_USE_TIMESTAMP + " TIMESTAMP" - + " Use specified timestamp (in format yyyyMMddHHmmss) instead of current time \n" + + " Use specified timestamp instead of current time\n" + + " TIMESTAMP format yyyyMMdd:HHmmss\n" + "-" + OPTION_DO_NOT_CREATE_FILE + " Do not create any files"; private boolean changeModTime = false; @@ -183,7 +184,8 @@ private void updateTime(PathData item) throws IOException { time = dateFormat.parse(timestamp).getTime(); } catch (ParseException e) { throw new IllegalArgumentException( - "Unable to parse the specified timestamp " + timestamp, e); + "Unable to parse the specified timestamp "+ timestamp + + ". The expected format is " + dateFormat.toPattern(), e); } } if (changeModTime ^ changeAccessTime) { diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md index c5080dba29e47..c217cad82d2e1 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md @@ -760,7 +760,7 @@ timestamp of that URI. * Use -a option to change only the access time * Use -m option to change only the modification time -* Use -t option to specify timestamp (in format yyyyMMddHHmmss) instead of current time +* Use -t option to specify timestamp (in format yyyyMMdd:HHmmss) instead of current time * Use -c option to not create file if it does not exist The timestamp format is as follows @@ -770,13 +770,13 @@ The timestamp format is as follows * HH Two digit hour of the day using 24 hour notation (e.g. 23 stands for 11 pm, 11 stands for 11 am) * mm Two digit minutes of the hour * ss Two digit seconds of the minute -e.g. 20180809230000 represents August 9th 2018, 11pm +e.g. 20180809:230000 represents August 9th 2018, 11pm Example: * `hadoop fs -touch pathname` -* `hadoop fs -touch -m -t 20180809230000 pathname` -* `hadoop fs -touch -t 20180809230000 pathname` +* `hadoop fs -touch -m -t 20180809:230000 pathname` +* `hadoop fs -touch -t 20180809:230000 pathname` * `hadoop fs -touch -a pathname` Exit Code: Returns 0 on success and -1 on error. diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml index cbc50b9d1c683..edf598b68df4f 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml @@ -819,7 +819,7 @@ RegexpComparator - ^-touch \[-a\] \[-m\] \[-t TIMESTAMP \] \[-c\] <path> \.\.\. :( )* + ^-touch \[-a\] \[-m\] \[-t TIMESTAMP \(yyyyMMdd\:HHmmss\) \] \[-c\] <path> \.\.\. :( )* RegexpComparator @@ -847,11 +847,11 @@ RegexpComparator - ^\s*-t\s+TIMESTAMP\s+Use specified timestamp \(in format yyyyMMddHHmmss\) instead of + ^\s*-t\s+TIMESTAMP\s+Use specified timestamp instead of current time( )* RegexpComparator - ^\s*current time( )* + ^\s*TIMESTAMP format yyyyMMdd\:HHmmss RegexpComparator From 01e0663f2c0e343ba6481ea1d2ad74ac48b546f7 Mon Sep 17 00:00:00 2001 From: sunlisheng Date: Mon, 15 Mar 2021 11:34:13 +0800 Subject: [PATCH 099/666] HDFS-15809. DeadNodeDetector does not remove live nodes from dead node set. Contributed by Jinglun. --- .../apache/hadoop/hdfs/DeadNodeDetector.java | 85 +++++++++++-------- .../hdfs/client/HdfsClientConfigKeys.java | 10 +-- .../src/main/resources/hdfs-default.xml | 20 ++--- .../hadoop/hdfs/TestDeadNodeDetection.java | 71 ++++++++++------ 4 files changed, 107 insertions(+), 79 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java index e17f2612675b7..cd46551f0225b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java @@ -29,9 +29,9 @@ import java.util.HashSet; import java.util.Map; -import java.util.Queue; import java.util.Set; -import java.util.concurrent.ArrayBlockingQueue; +import java.util.Deque; +import java.util.LinkedList; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; @@ -40,8 +40,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_DEFAULT; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_DEAD_NODE_INTERVAL_MS_DEFAULT; @@ -54,9 +52,9 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_SUSPECT_NODE_THREADS_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_RPC_THREADS_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_RPC_THREADS_KEY; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_DEFAULT; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_DEFAULT; /** * Detect the dead nodes in advance, and share this information among all the @@ -74,7 +72,7 @@ public class DeadNodeDetector extends Daemon { /** * Waiting time when DeadNodeDetector's state is idle. */ - private static final long IDLE_SLEEP_MS = 10000; + private final long idleSleepMs; /** * Client context name. @@ -113,16 +111,6 @@ public class DeadNodeDetector extends Daemon { */ private long suspectNodeDetectInterval = 0; - /** - * The max queue size of probing dead node. - */ - private int maxDeadNodesProbeQueueLen = 0; - - /** - * The max queue size of probing suspect node. - */ - private int maxSuspectNodesProbeQueueLen; - /** * Connection timeout for probing dead node in milliseconds. */ @@ -131,12 +119,12 @@ public class DeadNodeDetector extends Daemon { /** * The dead node probe queue. */ - private Queue deadNodesProbeQueue; + private UniqueQueue deadNodesProbeQueue; /** * The suspect node probe queue. */ - private Queue suspectNodesProbeQueue; + private UniqueQueue suspectNodesProbeQueue; /** * The thread pool of probing dead node. @@ -181,6 +169,32 @@ private enum State { INIT, CHECK_DEAD, IDLE, ERROR } + /** + * The thread safe unique queue. + */ + static class UniqueQueue { + private Deque queue = new LinkedList<>(); + private Set set = new HashSet<>(); + + synchronized boolean offer(T dn) { + if (set.add(dn)) { + queue.addLast(dn); + return true; + } + return false; + } + + synchronized T poll() { + T dn = queue.pollFirst(); + set.remove(dn); + return dn; + } + + synchronized int size() { + return set.size(); + } + } + /** * Disabled start probe suspect/dead thread for the testing. */ @@ -203,20 +217,14 @@ public DeadNodeDetector(String name, Configuration conf) { DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_SUSPECT_NODE_INTERVAL_MS_DEFAULT); socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT); - maxDeadNodesProbeQueueLen = - conf.getInt(DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_KEY, - DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_DEFAULT); - maxSuspectNodesProbeQueueLen = - conf.getInt(DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_KEY, - DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_DEFAULT); probeConnectionTimeoutMs = conf.getLong( DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_KEY, DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_DEFAULT); + this.deadNodesProbeQueue = new UniqueQueue<>(); + this.suspectNodesProbeQueue = new UniqueQueue<>(); - this.deadNodesProbeQueue = - new ArrayBlockingQueue(maxDeadNodesProbeQueueLen); - this.suspectNodesProbeQueue = - new ArrayBlockingQueue(maxSuspectNodesProbeQueueLen); + idleSleepMs = conf.getLong(DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY, + DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_DEFAULT); int deadNodeDetectDeadThreads = conf.getInt(DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_DEAD_NODE_THREADS_KEY, @@ -447,8 +455,7 @@ private void checkDeadNodes() { for (DatanodeInfo datanodeInfo : datanodeInfos) { if (!deadNodesProbeQueue.offer(datanodeInfo)) { LOG.debug("Skip to add dead node {} to check " + - "since the probe queue is full.", datanodeInfo); - break; + "since the node is already in the probe queue.", datanodeInfo); } else { LOG.debug("Add dead node to check: {}.", datanodeInfo); } @@ -458,7 +465,7 @@ private void checkDeadNodes() { private void idle() { try { - Thread.sleep(IDLE_SLEEP_MS); + Thread.sleep(idleSleepMs); } catch (InterruptedException e) { LOG.debug("Got interrupted while DeadNodeDetector is idle.", e); Thread.currentThread().interrupt(); @@ -483,14 +490,24 @@ private void removeFromDead(DatanodeInfo datanodeInfo) { deadNodes.remove(datanodeInfo.getDatanodeUuid()); } - public Queue getDeadNodesProbeQueue() { + public UniqueQueue getDeadNodesProbeQueue() { return deadNodesProbeQueue; } - public Queue getSuspectNodesProbeQueue() { + public UniqueQueue getSuspectNodesProbeQueue() { return suspectNodesProbeQueue; } + @VisibleForTesting + void setSuspectQueue(UniqueQueue queue) { + this.suspectNodesProbeQueue = queue; + } + + @VisibleForTesting + void setDeadQueue(UniqueQueue queue) { + this.deadNodesProbeQueue = queue; + } + /** * Add datanode to suspectNodes and suspectAndDeadNodes. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index c17ad0e861514..2a6a7a530edfd 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -164,13 +164,9 @@ public interface HdfsClientConfigKeys { "dfs.client.deadnode.detection.enabled"; boolean DFS_CLIENT_DEAD_NODE_DETECTION_ENABLED_DEFAULT = false; - String DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_KEY = - "dfs.client.deadnode.detection.deadnode.queue.max"; - int DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_DEFAULT = 100; - - String DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_KEY = - "dfs.client.deadnode.detection.suspectnode.queue.max"; - int DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_DEFAULT = 1000; + String DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY = + "dfs.client.deadnode.detection.idle.sleep.ms"; + long DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_DEFAULT = 10000; String DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_KEY = "dfs.client.deadnode.detection.probe.connection.timeout.ms"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 94ff3ec71ec6a..c3080581de9e3 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3191,26 +3191,18 @@ - dfs.client.deadnode.detection.deadnode.queue.max - 100 - - The max queue size of probing dead node. - - - - - dfs.client.deadnode.detection.suspectnode.queue.max - 1000 + dfs.client.deadnode.detection.probe.deadnode.threads + 10 - The max queue size of probing suspect node. + The maximum number of threads to use for probing dead node. - dfs.client.deadnode.detection.probe.deadnode.threads - 10 + dfs.client.deadnode.detection.idle.sleep.ms + 10000 - The maximum number of threads to use for probing dead node. + The sleep time of DeadNodeDetector per iteration. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java index 9134f36bc95dd..e8da918e1f7cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java @@ -30,19 +30,20 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import java.io.IOException; +import java.util.Collection; import java.util.Queue; import java.util.concurrent.LinkedBlockingQueue; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_ENABLED_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_DEAD_NODE_INTERVAL_MS_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_SUSPECT_NODE_INTERVAL_MS_KEY; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertNotSame; @@ -73,6 +74,7 @@ public void setUp() { DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_KEY, 1000); conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 0); + conf.setLong(DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY, 100); } @After @@ -247,42 +249,63 @@ public void testDeadNodeDetectionDeadNodeRecovery() throws Exception { } @Test - public void testDeadNodeDetectionMaxDeadNodesProbeQueue() throws Exception { - conf.setInt(DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_KEY, 1); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); - cluster.waitActive(); - - FileSystem fs = cluster.getFileSystem(); - Path filePath = new Path("/testDeadNodeDetectionMaxDeadNodesProbeQueue"); - createFile(fs, filePath); - - // Remove three DNs, - cluster.stopDataNode(0); - cluster.stopDataNode(0); - cluster.stopDataNode(0); - - FSDataInputStream in = fs.open(filePath); - DFSInputStream din = (DFSInputStream) in.getWrappedStream(); - DFSClient dfsClient = din.getDFSClient(); + public void testDeadNodeDetectionDeadNodeProbe() throws Exception { + FileSystem fs = null; + FSDataInputStream in = null; + Path filePath = new Path("/" + GenericTestUtils.getMethodName()); try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); + + fs = cluster.getFileSystem(); + createFile(fs, filePath); + + // Remove three DNs, + cluster.stopDataNode(0); + cluster.stopDataNode(0); + cluster.stopDataNode(0); + + in = fs.open(filePath); + DFSInputStream din = (DFSInputStream) in.getWrappedStream(); + DFSClient dfsClient = din.getDFSClient(); + DeadNodeDetector deadNodeDetector = + dfsClient.getClientContext().getDeadNodeDetector(); + // Spy suspect queue and dead queue. + DeadNodeDetector.UniqueQueue queue = + deadNodeDetector.getSuspectNodesProbeQueue(); + DeadNodeDetector.UniqueQueue suspectSpy = + Mockito.spy(queue); + deadNodeDetector.setSuspectQueue(suspectSpy); + queue = deadNodeDetector.getDeadNodesProbeQueue(); + DeadNodeDetector.UniqueQueue deadSpy = Mockito.spy(queue); + deadNodeDetector.setDeadQueue(deadSpy); + // Trigger dead node detection. try { in.read(); } catch (BlockMissingException e) { } Thread.sleep(1500); - Assert.assertTrue((dfsClient.getClientContext().getDeadNodeDetector() - .getDeadNodesProbeQueue().size() - + dfsClient.getDeadNodes(din).size()) <= 4); + Collection deadNodes = + dfsClient.getDeadNodeDetector().clearAndGetDetectedDeadNodes(); + assertEquals(3, deadNodes.size()); + for (DatanodeInfo dead : deadNodes) { + // Each node is suspected once then marked as dead. + Mockito.verify(suspectSpy, Mockito.times(1)).offer(dead); + // All the dead nodes should be scheduled and probed at least once. + Mockito.verify(deadSpy, Mockito.atLeastOnce()).offer(dead); + Mockito.verify(deadSpy, Mockito.atLeastOnce()).poll(); + } } finally { - in.close(); + if (in != null) { + in.close(); + } deleteFile(fs, filePath); } } @Test public void testDeadNodeDetectionSuspectNode() throws Exception { - conf.setInt(DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_KEY, 1); DeadNodeDetector.setDisabledProbeThreadForTest(true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); From 556172d76fa42c4e00e90a70e725a9d47bcb2237 Mon Sep 17 00:00:00 2001 From: Chao Sun Date: Sun, 14 Mar 2021 21:14:37 -0700 Subject: [PATCH 100/666] HADOOP-17532. Yarn Job execution get failed when LZ4 Compression Codec is used. Contributed Bhavik Patel. --- hadoop-tools/hadoop-kafka/pom.xml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml index a6f854a1ec272..d8c01eda23bee 100644 --- a/hadoop-tools/hadoop-kafka/pom.xml +++ b/hadoop-tools/hadoop-kafka/pom.xml @@ -97,6 +97,10 @@ org.xerial.snappy snappy-java + + net.jpountz.lz4 + lz4 + From e7f52546dfc01538294fdcfe0cc3cd5437dc4aec Mon Sep 17 00:00:00 2001 From: Eric Payne Date: Mon, 15 Mar 2021 19:09:40 +0000 Subject: [PATCH 101/666] YARN-10588. Percentage of queue and cluster is zero in WebUI . Contributed by Bilwa S T --- .../yarn/util/resource/DefaultResourceCalculator.java | 5 +++++ .../yarn/util/resource/DominantResourceCalculator.java | 1 + .../hadoop/yarn/util/resource/ResourceCalculator.java | 9 +++++++++ .../scheduler/SchedulerApplicationAttempt.java | 2 +- .../scheduler/common/fica/FiCaSchedulerApp.java | 2 +- 5 files changed, 17 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java index 232cd49b03c49..a7b59806a0b4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java @@ -166,4 +166,9 @@ public Set getInsufficientResourceNames(Resource required, return ImmutableSet.of(); } } + + @Override + public boolean isAllInvalidDivisor(Resource r) { + return isInvalidDivisor(r); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java index 0a6df09de9905..db62a895e6e54 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java @@ -388,6 +388,7 @@ public boolean isInvalidDivisor(Resource r) { return false; } + @Override public boolean isAllInvalidDivisor(Resource r) { boolean flag = true; for (ResourceInformation res : r.getResources()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java index 91cbdd7e7e9a2..05850137c747e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java @@ -225,6 +225,15 @@ public abstract float divide( */ public abstract boolean isInvalidDivisor(Resource r); + + /** + * Determine if all resources are zero. + * + * @param r resource + * @return true if all divisors are invalid (should not be used), false else + */ + public abstract boolean isAllInvalidDivisor(Resource r); + /** * Ratio of resource a to resource b. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index e9575b9d5dd31..649fabd796a9e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -1138,7 +1138,7 @@ public ApplicationResourceUsageReport getResourceUsageReport() { .put(ResourceInformation.VCORES.getName(), 0L); float queueUsagePerc = 0.0f; float clusterUsagePerc = 0.0f; - if (!calc.isInvalidDivisor(cluster)) { + if (!calc.isAllInvalidDivisor(cluster)) { float queueCapacityPerc = queue.getQueueInfo(false, false) .getCapacity(); queueUsagePerc = calc.divide(cluster, usedResourceClone, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index af1f70a4b531c..6507259f10cfd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -1113,7 +1113,7 @@ public ApplicationResourceUsageReport getResourceUsageReport() { ResourceCalculator calc = rmContext.getScheduler().getResourceCalculator(); float queueUsagePerc = 0.0f; - if (!calc.isInvalidDivisor(totalPartitionRes)) { + if (!calc.isAllInvalidDivisor(totalPartitionRes)) { Resource effCap = ((AbstractCSQueue) getQueue()) .getEffectiveCapacity(getAppAMNodePartitionName()); if (!effCap.equals(Resources.none())) { From 1cd170f2d3fcdb669cec9735aef8f52069d76316 Mon Sep 17 00:00:00 2001 From: Jim Brennan Date: Mon, 15 Mar 2021 20:13:17 +0000 Subject: [PATCH 102/666] MAPREDUCE-7322. revisiting TestMRIntermediateDataEncryption. Contributed by Ahmed Hussein. --- .../org/apache/hadoop/util/JarFinder.java | 5 +- .../hadoop/mapreduce/v2/app/TestRecovery.java | 23 +- .../org/apache/hadoop/mapred/BackupStore.java | 5 +- .../org/apache/hadoop/mapred/MapTask.java | 34 +- .../java/org/apache/hadoop/mapred/Merger.java | 6 +- .../security/IntermediateEncryptedStream.java | 89 ++++ .../security/SpillCallBackInjector.java | 86 ++++ .../security/SpillCallBackPathsFinder.java | 193 ++++++++ .../mapreduce/security/package-info.java | 25 ++ .../hadoop/mapreduce/task/reduce/Fetcher.java | 5 +- .../mapreduce/task/reduce/LocalFetcher.java | 6 +- .../task/reduce/MergeManagerImpl.java | 14 +- .../task/reduce/OnDiskMapOutput.java | 5 +- .../hadoop/mapreduce/util/MRJobConfUtil.java | 58 +++ .../mapreduce/task/reduce/TestMerger.java | 77 ++-- .../hadoop/mapred/TestLocalJobSubmission.java | 112 +++-- .../TestMRIntermediateDataEncryption.java | 327 -------------- .../mapred/TestMROpportunisticMaps.java | 5 +- .../org/apache/hadoop/mapred/TestMerge.java | 6 +- .../hadoop/mapreduce/RandomTextWriter.java | 32 +- .../TestMRIntermediateDataEncryption.java | 411 ++++++++++++++++++ 21 files changed, 1079 insertions(+), 445 deletions(-) create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/IntermediateEncryptedStream.java create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackInjector.java create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackPathsFinder.java create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/package-info.java delete mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java index 3f1bb2d61dc92..85d95738b5ef5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java @@ -165,10 +165,11 @@ else if ("file".equals(url.getProtocol())) { if (!testDir.exists()) { testDir.mkdirs(); } - File tempJar = File.createTempFile("hadoop-", "", testDir); - tempJar = new File(tempJar.getAbsolutePath() + ".jar"); + File tempFile = File.createTempFile("hadoop-", "", testDir); + File tempJar = new File(tempFile.getAbsolutePath() + ".jar"); createJar(baseDir, tempJar); tempJar.deleteOnExit(); + tempFile.deleteOnExit(); return tempJar.getAbsolutePath(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java index 00c9b3aeefc4e..5a23b58875a0b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java @@ -40,6 +40,7 @@ import java.util.concurrent.TimeoutException; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent; import org.junit.Assert; @@ -105,6 +106,8 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.SystemClock; + +import org.junit.BeforeClass; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.slf4j.Logger; @@ -114,15 +117,24 @@ public class TestRecovery { private static final Logger LOG = LoggerFactory.getLogger(TestRecovery.class); - private static Path outputDir = new Path(new File("target", - TestRecovery.class.getName()).getAbsolutePath() + - Path.SEPARATOR + "out"); + + private static File testRootDir; + private static Path outputDir; private static String partFile = "part-r-00000"; private Text key1 = new Text("key1"); private Text key2 = new Text("key2"); private Text val1 = new Text("val1"); private Text val2 = new Text("val2"); + @BeforeClass + public static void setupClass() throws Exception { + // setup the test root directory + testRootDir = + GenericTestUtils.setupTestRootDir( + TestRecovery.class); + outputDir = new Path(testRootDir.getAbsolutePath(), "out"); + } + /** * AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt * completely disappears because of failed launch, one attempt gets killed and @@ -600,14 +612,13 @@ public void testRecoveryWithSpillEncryption() throws Exception { MRApp app = new MRAppWithHistory(1, 1, false, this.getClass().getName(), true, ++runCount) { }; - Configuration conf = new Configuration(); + Configuration conf = + MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(null); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true); conf.setBoolean("mapred.mapper.new-api", true); conf.setBoolean("mapred.reducer.new-api", true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); - conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); - // run the MR job at the first attempt Job jobAttempt1 = app.submit(conf); app.waitForState(jobAttempt1, JobState.RUNNING); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java index 94ad9e0187ef2..5bd26883af0fa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java @@ -42,7 +42,8 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; -import org.apache.hadoop.mapreduce.CryptoUtils; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -576,7 +577,7 @@ private Writer createSpillFile() throws IOException { file = lDirAlloc.getLocalPathForWrite(tmp.toUri().getPath(), -1, conf); FSDataOutputStream out = fs.create(file); - out = CryptoUtils.wrapIfNecessary(conf, out); + out = IntermediateEncryptedStream.wrapIfNecessary(conf, out, tmp); return new Writer(conf, out, null, null, null, null, true); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index 17461b196b37e..fa4396d77f458 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -63,6 +63,7 @@ import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; import org.apache.hadoop.mapreduce.lib.map.WrappedMapper; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex; import org.apache.hadoop.mapreduce.task.MapContextImpl; import org.apache.hadoop.mapreduce.CryptoUtils; @@ -1630,7 +1631,9 @@ private void sortAndSpill() throws IOException, ClassNotFoundException, IFile.Writer writer = null; try { long segmentStart = out.getPos(); - partitionOut = CryptoUtils.wrapIfNecessary(job, out, false); + partitionOut = + IntermediateEncryptedStream.wrapIfNecessary(job, out, false, + filename); writer = new Writer(job, partitionOut, keyClass, valClass, codec, spilledRecordsCounter); if (combinerRunner == null) { @@ -1687,6 +1690,7 @@ private void sortAndSpill() throws IOException, ClassNotFoundException, Path indexFilename = mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH); + IntermediateEncryptedStream.addSpillIndexFile(indexFilename, job); spillRec.writeToFile(indexFilename, job); } else { indexCacheList.add(spillRec); @@ -1727,7 +1731,9 @@ private void spillSingleRecord(final K key, final V value, try { long segmentStart = out.getPos(); // Create a new codec, don't care! - partitionOut = CryptoUtils.wrapIfNecessary(job, out, false); + partitionOut = + IntermediateEncryptedStream.wrapIfNecessary(job, out, false, + filename); writer = new IFile.Writer(job, partitionOut, keyClass, valClass, codec, spilledRecordsCounter); @@ -1761,6 +1767,7 @@ private void spillSingleRecord(final K key, final V value, Path indexFilename = mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH); + IntermediateEncryptedStream.addSpillIndexFile(indexFilename, job); spillRec.writeToFile(indexFilename, job); } else { indexCacheList.add(spillRec); @@ -1854,15 +1861,19 @@ private void mergeParts() throws IOException, InterruptedException, finalOutFileSize += rfs.getFileStatus(filename[i]).getLen(); } if (numSpills == 1) { //the spill is the final output + Path indexFileOutput = + mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]); sameVolRename(filename[0], mapOutputFile.getOutputFileForWriteInVolume(filename[0])); if (indexCacheList.size() == 0) { - sameVolRename(mapOutputFile.getSpillIndexFile(0), - mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0])); + Path indexFilePath = mapOutputFile.getSpillIndexFile(0); + IntermediateEncryptedStream.validateSpillIndexFile( + indexFilePath, job); + sameVolRename(indexFilePath, indexFileOutput); } else { - indexCacheList.get(0).writeToFile( - mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]), job); + indexCacheList.get(0).writeToFile(indexFileOutput, job); } + IntermediateEncryptedStream.addSpillIndexFile(indexFileOutput, job); sortPhase.complete(); return; } @@ -1870,6 +1881,7 @@ private void mergeParts() throws IOException, InterruptedException, // read in paged indices for (int i = indexCacheList.size(); i < numSpills; ++i) { Path indexFileName = mapOutputFile.getSpillIndexFile(i); + IntermediateEncryptedStream.validateSpillIndexFile(indexFileName, job); indexCacheList.add(new SpillRecord(indexFileName, job)); } @@ -1881,7 +1893,7 @@ private void mergeParts() throws IOException, InterruptedException, mapOutputFile.getOutputFileForWrite(finalOutFileSize); Path finalIndexFile = mapOutputFile.getOutputIndexFileForWrite(finalIndexFileSize); - + IntermediateEncryptedStream.addSpillIndexFile(finalIndexFile, job); //The output stream for the final single output file FSDataOutputStream finalOut = rfs.create(finalOutputFile, true, 4096); FSDataOutputStream finalPartitionOut = null; @@ -1893,8 +1905,9 @@ private void mergeParts() throws IOException, InterruptedException, try { for (int i = 0; i < partitions; i++) { long segmentStart = finalOut.getPos(); - finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut, - false); + finalPartitionOut = + IntermediateEncryptedStream.wrapIfNecessary(job, finalOut, + false, finalOutputFile); Writer writer = new Writer(job, finalPartitionOut, keyClass, valClass, codec, null); writer.close(); @@ -1957,7 +1970,8 @@ private void mergeParts() throws IOException, InterruptedException, //write merged output to disk long segmentStart = finalOut.getPos(); - finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut, false); + finalPartitionOut = IntermediateEncryptedStream.wrapIfNecessary(job, + finalOut, false, finalOutputFile); Writer writer = new Writer(job, finalPartitionOut, keyClass, valClass, codec, spilledRecordsCounter); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java index 16f88370bb6e3..d783752cf717d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java @@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.CryptoUtils; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.util.PriorityQueue; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Progressable; @@ -302,7 +303,7 @@ void init(Counters.Counter readsCounter) throws IOException { FSDataInputStream in = fs.open(file); in.seek(segmentOffset); - in = CryptoUtils.wrapIfNecessary(conf, in); + in = IntermediateEncryptedStream.wrapIfNecessary(conf, in, file); reader = new Reader(conf, in, segmentLength - CryptoUtils.cryptoPadding(conf), codec, readsCounter); @@ -730,7 +731,8 @@ RawKeyValueIterator merge(Class keyClass, Class valueClass, approxOutputSize, conf); FSDataOutputStream out = fs.create(outputFile); - out = CryptoUtils.wrapIfNecessary(conf, out); + out = IntermediateEncryptedStream.wrapIfNecessary(conf, out, + outputFile); Writer writer = new Writer(conf, out, keyClass, valueClass, codec, writesCounter, true); writeFile(this, writer, reporter, conf); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/IntermediateEncryptedStream.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/IntermediateEncryptedStream.java new file mode 100644 index 0000000000000..eb14a208c9961 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/IntermediateEncryptedStream.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.security; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapreduce.CryptoUtils; + +/** + * Used to wrap helpers while spilling intermediate files. + * Setting the {@link SpillCallBackInjector} helps in: + * 1- adding callbacks to capture the path of the spilled files. + * 2- Verifying the encryption when intermediate encryption is enabled. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class IntermediateEncryptedStream { + + private static SpillCallBackInjector prevSpillCBInjector = null; + + public static FSDataOutputStream wrapIfNecessary(Configuration conf, + FSDataOutputStream out, Path outPath) throws IOException { + SpillCallBackInjector.get().writeSpillFileCB(outPath, out, conf); + return CryptoUtils.wrapIfNecessary(conf, out, true); + } + + public static FSDataOutputStream wrapIfNecessary(Configuration conf, + FSDataOutputStream out, boolean closeOutputStream, + Path outPath) throws IOException { + SpillCallBackInjector.get().writeSpillFileCB(outPath, out, conf); + return CryptoUtils.wrapIfNecessary(conf, out, closeOutputStream); + } + + public static FSDataInputStream wrapIfNecessary(Configuration conf, + FSDataInputStream in, Path inputPath) throws IOException { + SpillCallBackInjector.get().getSpillFileCB(inputPath, in, conf); + return CryptoUtils.wrapIfNecessary(conf, in); + } + + public static InputStream wrapIfNecessary(Configuration conf, + InputStream in, long length, Path inputPath) throws IOException { + SpillCallBackInjector.get().getSpillFileCB(inputPath, in, conf); + return CryptoUtils.wrapIfNecessary(conf, in, length); + } + + public static void addSpillIndexFile(Path indexFilename, Configuration conf) { + SpillCallBackInjector.get().addSpillIndexFileCB(indexFilename, conf); + } + + public static void validateSpillIndexFile(Path indexFilename, + Configuration conf) { + SpillCallBackInjector.get().validateSpillIndexFileCB(indexFilename, conf); + } + + public static SpillCallBackInjector resetSpillCBInjector() { + return setSpillCBInjector(prevSpillCBInjector); + } + + public synchronized static SpillCallBackInjector setSpillCBInjector( + SpillCallBackInjector spillInjector) { + prevSpillCBInjector = + SpillCallBackInjector.getAndSet(spillInjector); + return spillInjector; + } + + private IntermediateEncryptedStream() {} +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackInjector.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackInjector.java new file mode 100644 index 0000000000000..9b23c518f1a3b --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackInjector.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.security; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +/** + * Used for injecting callbacks while spilling files. + * Calls into this are a no-op in production code. + */ +@VisibleForTesting +@InterfaceAudience.Private +public class SpillCallBackInjector { + private static SpillCallBackInjector instance = new SpillCallBackInjector(); + public static SpillCallBackInjector get() { + return instance; + } + /** + * Sets the global SpillFilesCBInjector to the new value, returning the old + * value. + * + * @param spillInjector the new implementation for the spill injector. + * @return the previous implementation. + */ + public static SpillCallBackInjector getAndSet( + SpillCallBackInjector spillInjector) { + SpillCallBackInjector prev = instance; + instance = spillInjector; + return prev; + } + + public void writeSpillIndexFileCB(Path path) { + // do nothing + } + + public void writeSpillFileCB(Path path, FSDataOutputStream out, + Configuration conf) { + // do nothing + } + + public void getSpillFileCB(Path path, InputStream is, Configuration conf) { + // do nothing + } + + public String getSpilledFileReport() { + return null; + } + + public void handleErrorInSpillFill(Path path, Exception e) { + // do nothing + } + + public void corruptSpilledFile(Path fileName) throws IOException { + // do nothing + } + + public void addSpillIndexFileCB(Path path, Configuration conf) { + // do nothing + } + + public void validateSpillIndexFileCB(Path path, Configuration conf) { + // do nothing + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackPathsFinder.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackPathsFinder.java new file mode 100644 index 0000000000000..7be99e556e597 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackPathsFinder.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.security; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoStreamUtils; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapreduce.CryptoUtils; + +/** + * An implementation class that keeps track of the spilled files. + */ +public class SpillCallBackPathsFinder extends SpillCallBackInjector { + private static final Logger LOG = + LoggerFactory.getLogger(SpillCallBackPathsFinder.class); + /** + * Encrypted spilled files. + */ + private final Map> encryptedSpillFiles = + Collections.synchronizedMap(new ConcurrentHashMap<>()); + /** + * Non-Encrypted spilled files. + */ + private final Map> spillFiles = + Collections.synchronizedMap(new ConcurrentHashMap<>()); + /** + * Invalid position access. + */ + private final Map> invalidAccessMap = + Collections.synchronizedMap(new ConcurrentHashMap<>()); + /** + * Index spill files. + */ + private final Set indexSpillFiles = ConcurrentHashMap.newKeySet(); + /** + * Paths that were not found in the maps. + */ + private final Set negativeCache = ConcurrentHashMap.newKeySet(); + + protected Map> getFilesMap(Configuration config) { + if (CryptoUtils.isEncryptedSpillEnabled(config)) { + return encryptedSpillFiles; + } + return spillFiles; + } + + @Override + public void writeSpillFileCB(Path path, FSDataOutputStream out, + Configuration conf) { + long outPos = out.getPos(); + getFilesMap(conf) + .computeIfAbsent(path, p -> ConcurrentHashMap.newKeySet()) + .add(outPos); + LOG.debug("writeSpillFileCB.. path:{}; pos:{}", path, outPos); + } + + @Override + public void getSpillFileCB(Path path, InputStream is, Configuration conf) { + if (path == null) { + return; + } + Set pathEntries = getFilesMap(conf).get(path); + if (pathEntries != null) { + try { + long isPos = CryptoStreamUtils.getInputStreamOffset(is); + if (pathEntries.contains(isPos)) { + LOG.debug("getSpillFileCB... Path {}; Pos: {}", path, isPos); + return; + } + invalidAccessMap + .computeIfAbsent(path, p -> ConcurrentHashMap.newKeySet()) + .add(isPos); + LOG.debug("getSpillFileCB... access incorrect position.. " + + "Path {}; Pos: {}", path, isPos); + } catch (IOException e) { + LOG.error("Could not get inputStream position.. Path {}", path, e); + // do nothing + } + return; + } + negativeCache.add(path); + LOG.warn("getSpillFileCB.. Could not find spilled file .. Path: {}", path); + } + + @Override + public String getSpilledFileReport() { + StringBuilder strBuilder = + new StringBuilder("\n++++++++ Spill Report ++++++++") + .append(dumpMapEntries("Encrypted Spilled Files", + encryptedSpillFiles)) + .append(dumpMapEntries("Non-Encrypted Spilled Files", + spillFiles)) + .append(dumpMapEntries("Invalid Spill Access", + invalidAccessMap)) + .append("\n ----- Spilled Index Files ----- ") + .append(indexSpillFiles.size()); + for (Path p : indexSpillFiles) { + strBuilder.append("\n\t index-path: ").append(p.toString()); + } + strBuilder.append("\n ----- Negative Cache files ----- ") + .append(negativeCache.size()); + for (Path p : negativeCache) { + strBuilder.append("\n\t path: ").append(p.toString()); + } + return strBuilder.toString(); + } + + @Override + public void addSpillIndexFileCB(Path path, Configuration conf) { + if (path == null) { + return; + } + indexSpillFiles.add(path); + LOG.debug("addSpillIndexFileCB... Path: {}", path); + } + + @Override + public void validateSpillIndexFileCB(Path path, Configuration conf) { + if (path == null) { + return; + } + if (indexSpillFiles.contains(path)) { + LOG.debug("validateSpillIndexFileCB.. Path: {}", path); + return; + } + LOG.warn("validateSpillIndexFileCB.. could not retrieve indexFile.. " + + "Path: {}", path); + negativeCache.add(path); + } + + public Set getEncryptedSpilledFiles() { + return Collections.unmodifiableSet(encryptedSpillFiles.keySet()); + } + + /** + * Gets the set of path:pos of the entries that were accessed incorrectly. + * @return a set of string in the format of {@literal Path[Pos]} + */ + public Set getInvalidSpillEntries() { + Set result = new LinkedHashSet<>(); + for (Entry> spillMapEntry: invalidAccessMap.entrySet()) { + for (Long singleEntry : spillMapEntry.getValue()) { + result.add(String.format("%s[%d]", + spillMapEntry.getKey(), singleEntry)); + } + } + return result; + } + + private String dumpMapEntries(String label, + Map> entriesMap) { + StringBuilder strBuilder = + new StringBuilder(String.format("%n ----- %s ----- %d", label, + entriesMap.size())); + for (Entry> encryptedSpillEntry + : entriesMap.entrySet()) { + strBuilder.append(String.format("%n\t\tpath: %s", + encryptedSpillEntry.getKey())); + for (Long singlePos : encryptedSpillEntry.getValue()) { + strBuilder.append(String.format("%n\t\t\tentry: %d", singlePos)); + } + } + return strBuilder.toString(); + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/package-info.java new file mode 100644 index 0000000000000..451e6f6550381 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/package-info.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +/** + * Helper classes for the shuffle/spill encryptions. + */ +package org.apache.hadoop.mapreduce.security; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java index 6e29fe47d88fc..d8bc68c6c71d5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java @@ -42,6 +42,7 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.mapreduce.security.SecureShuffleUtils; import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.security.ssl.SSLFactory; @@ -512,7 +513,9 @@ private TaskAttemptID[] copyMapOutput(MapHost host, } InputStream is = input; - is = CryptoUtils.wrapIfNecessary(jobConf, is, compressedLength); + is = + IntermediateEncryptedStream.wrapIfNecessary(jobConf, is, + compressedLength, null); compressedLength -= CryptoUtils.cryptoPadding(jobConf); decompressedLength -= CryptoUtils.cryptoPadding(jobConf); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java index 90160cfa07c27..3ae1e746fcb9f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java @@ -36,6 +36,8 @@ import org.apache.hadoop.mapred.SpillRecord; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.CryptoUtils; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -151,7 +153,9 @@ private boolean copyMapOutput(TaskAttemptID mapTaskId) throws IOException { FileSystem localFs = FileSystem.getLocal(job).getRaw(); FSDataInputStream inStream = localFs.open(mapOutputFileName); try { - inStream = CryptoUtils.wrapIfNecessary(job, inStream); + inStream = + IntermediateEncryptedStream.wrapIfNecessary(job, inStream, + mapOutputFileName); inStream.seek(ir.startOffset + CryptoUtils.cryptoPadding(job)); mapOutput.shuffle(LOCALHOST, inStream, compressedLength, decompressedLength, metrics, reporter); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java index ae44ba4a91d08..29724de0eb923 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java @@ -53,7 +53,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.mapreduce.CryptoUtils; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.ReflectionUtils; @@ -468,7 +468,9 @@ public void merge(List> inputs) throws IOException { mergeOutputSize).suffix( Task.MERGED_OUTPUT_PREFIX); - FSDataOutputStream out = CryptoUtils.wrapIfNecessary(jobConf, rfs.create(outputPath)); + FSDataOutputStream out = + IntermediateEncryptedStream.wrapIfNecessary(jobConf, + rfs.create(outputPath), outputPath); Writer writer = new Writer(jobConf, out, (Class) jobConf.getMapOutputKeyClass(), (Class) jobConf.getMapOutputValueClass(), codec, null, true); @@ -552,7 +554,9 @@ public void merge(List inputs) throws IOException { localDirAllocator.getLocalPathForWrite(inputs.get(0).toString(), approxOutputSize, jobConf).suffix(Task.MERGED_OUTPUT_PREFIX); - FSDataOutputStream out = CryptoUtils.wrapIfNecessary(jobConf, rfs.create(outputPath)); + FSDataOutputStream out = + IntermediateEncryptedStream.wrapIfNecessary(jobConf, + rfs.create(outputPath), outputPath); Writer writer = new Writer(jobConf, out, (Class) jobConf.getMapOutputKeyClass(), (Class) jobConf.getMapOutputValueClass(), codec, null, true); @@ -735,7 +739,9 @@ private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs, tmpDir, comparator, reporter, spilledRecordsCounter, null, mergePhase); - FSDataOutputStream out = CryptoUtils.wrapIfNecessary(job, fs.create(outputPath)); + FSDataOutputStream out = + IntermediateEncryptedStream.wrapIfNecessary(job, + fs.create(outputPath), outputPath); Writer writer = new Writer(job, out, keyClass, valueClass, codec, null, true); try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java index 5f96a030988cf..54a9522e2ccfe 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java @@ -37,7 +37,7 @@ import org.apache.hadoop.mapred.MapOutputFile; import org.apache.hadoop.mapreduce.TaskAttemptID; -import org.apache.hadoop.mapreduce.CryptoUtils; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @@ -84,7 +84,8 @@ public OnDiskMapOutput(TaskAttemptID mapId, TaskAttemptID reduceId, this.fs = fs; this.outputPath = outputPath; tmpOutputPath = getTempPath(outputPath, fetcher); - disk = CryptoUtils.wrapIfNecessary(conf, fs.create(tmpOutputPath)); + disk = IntermediateEncryptedStream.wrapIfNecessary(conf, + fs.create(tmpOutputPath), tmpOutputPath); } @VisibleForTesting diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/MRJobConfUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/MRJobConfUtil.java index 4e4e78e1e3cb1..4319e17416831 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/MRJobConfUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/MRJobConfUtil.java @@ -17,14 +17,22 @@ */ package org.apache.hadoop.mapreduce.util; +import java.io.File; import java.util.concurrent.TimeUnit; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.MRJobConfig; /** * A class that contains utility methods for MR Job configuration. */ public final class MRJobConfUtil { + private static final Logger LOG = + LoggerFactory.getLogger(MRJobConfUtil.class); public static final String REDACTION_REPLACEMENT_VAL = "*********(redacted)"; /** @@ -130,4 +138,54 @@ public static long getTaskProgressWaitDeltaTimeThreshold() { public static double convertTaskProgressToFactor(final float progress) { return Math.floor(progress * MRJobConfUtil.PROGRESS_MIN_DELTA_FACTOR); } + + /** + * For unit tests, use urandom to avoid the YarnChild process from hanging + * on low entropy systems. + */ + private static final String TEST_JVM_SECURITY_EGD_OPT = + "-Djava.security.egd=file:/dev/./urandom"; + + public static Configuration initEncryptedIntermediateConfigsForTesting( + Configuration conf) { + Configuration config = + (conf == null) ? new Configuration(): conf; + final String childJVMOpts = + TEST_JVM_SECURITY_EGD_OPT.concat(" ") + .concat(config.get("mapred.child.java.opts", " ")); + // Set the jvm arguments. + config.set("yarn.app.mapreduce.am.admin-command-opts", + TEST_JVM_SECURITY_EGD_OPT); + config.set("mapred.child.java.opts", childJVMOpts); + config.setBoolean("mapreduce.job.encrypted-intermediate-data", true); + return config; + } + + /** + * Set local directories so that the generated folders is subdirectory of the + * test directories. + * @param conf + * @param testRootDir + * @return + */ + public static Configuration setLocalDirectoriesConfigForTesting( + Configuration conf, File testRootDir) { + Configuration config = + (conf == null) ? new Configuration(): conf; + final File hadoopLocalDir = new File(testRootDir, "hadoop-dir"); + // create the directory + if (!hadoopLocalDir.getAbsoluteFile().mkdirs()) { + LOG.info("{} directory already exists", hadoopLocalDir.getPath()); + } + Path mapredHadoopTempDir = new Path(hadoopLocalDir.getPath()); + Path mapredSystemDir = new Path(mapredHadoopTempDir, "system"); + Path stagingDir = new Path(mapredHadoopTempDir, "tmp/staging"); + // Set the temp directories a subdir of the test directory. + config.set("mapreduce.jobtracker.staging.root.dir", stagingDir.toString()); + config.set("mapreduce.jobtracker.system.dir", mapredSystemDir.toString()); + config.set("mapreduce.cluster.temp.dir", mapredHadoopTempDir.toString()); + config.set("mapreduce.cluster.local.dir", + new Path(mapredHadoopTempDir, "local").toString()); + return config; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java index 13cb6b32214fb..bd90941fa13cd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java @@ -24,6 +24,7 @@ import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -50,43 +51,60 @@ import org.apache.hadoop.mapred.Merger.Segment; import org.apache.hadoop.mapred.RawKeyValueIterator; import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskID; import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Progressable; import org.junit.Assert; import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TestName; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; public class TestMerger { - - private Configuration conf; + private static File testRootDir; + @Rule + public TestName unitTestName = new TestName(); + private File unitTestDir; private JobConf jobConf; private FileSystem fs; + @BeforeClass + public static void setupClass() throws Exception { + // setup the test root directory + testRootDir = + GenericTestUtils.setupTestRootDir( + TestMerger.class); + } + @Before public void setup() throws IOException { - conf = new Configuration(); + unitTestDir = new File(testRootDir, unitTestName.getMethodName()); + unitTestDir.mkdirs(); jobConf = new JobConf(); - fs = FileSystem.getLocal(conf); + // Set the temp directories a subdir of the test directory. + MRJobConfUtil.setLocalDirectoriesConfigForTesting(jobConf, unitTestDir); + jobConf.set(MRConfig.FRAMEWORK_NAME, "local"); + fs = FileSystem.getLocal(jobConf); } - @Test public void testEncryptedMerger() throws Throwable { - jobConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); - conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); + // Enable intermediate encryption. + MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(jobConf); Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); TokenCache.setEncryptedSpillKey(new byte[16], credentials); UserGroupInformation.getCurrentUser().addCredentials(credentials); @@ -106,8 +124,8 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { LocalDirAllocator lda = new LocalDirAllocator(MRConfig.LOCAL_DIR); MergeManagerImpl mergeManager = new MergeManagerImpl( - reduceId1, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null, - null, null, new Progress(), new MROutputFiles()); + reduceId1, jobConf, fs, lda, Reporter.NULL, null, null, null, null, + null, null, null, new Progress(), new MROutputFiles()); // write map outputs Map map1 = new TreeMap(); @@ -115,12 +133,12 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { map1.put("carrot", "delicious"); Map map2 = new TreeMap(); map1.put("banana", "pretty good"); - byte[] mapOutputBytes1 = writeMapOutput(conf, map1); - byte[] mapOutputBytes2 = writeMapOutput(conf, map2); + byte[] mapOutputBytes1 = writeMapOutput(jobConf, map1); + byte[] mapOutputBytes2 = writeMapOutput(jobConf, map2); InMemoryMapOutput mapOutput1 = new InMemoryMapOutput( - conf, mapId1, mergeManager, mapOutputBytes1.length, null, true); + jobConf, mapId1, mergeManager, mapOutputBytes1.length, null, true); InMemoryMapOutput mapOutput2 = new InMemoryMapOutput( - conf, mapId2, mergeManager, mapOutputBytes2.length, null, true); + jobConf, mapId2, mergeManager, mapOutputBytes2.length, null, true); System.arraycopy(mapOutputBytes1, 0, mapOutput1.getMemory(), 0, mapOutputBytes1.length); System.arraycopy(mapOutputBytes2, 0, mapOutput2.getMemory(), 0, @@ -150,12 +168,12 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { map3.put("carrot", "amazing"); Map map4 = new TreeMap(); map4.put("banana", "bla"); - byte[] mapOutputBytes3 = writeMapOutput(conf, map3); - byte[] mapOutputBytes4 = writeMapOutput(conf, map4); + byte[] mapOutputBytes3 = writeMapOutput(jobConf, map3); + byte[] mapOutputBytes4 = writeMapOutput(jobConf, map4); InMemoryMapOutput mapOutput3 = new InMemoryMapOutput( - conf, mapId3, mergeManager, mapOutputBytes3.length, null, true); + jobConf, mapId3, mergeManager, mapOutputBytes3.length, null, true); InMemoryMapOutput mapOutput4 = new InMemoryMapOutput( - conf, mapId4, mergeManager, mapOutputBytes4.length, null, true); + jobConf, mapId4, mergeManager, mapOutputBytes4.length, null, true); System.arraycopy(mapOutputBytes3, 0, mapOutput3.getMemory(), 0, mapOutputBytes3.length); System.arraycopy(mapOutputBytes4, 0, mapOutput4.getMemory(), 0, @@ -174,12 +192,13 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { Assert.assertEquals(2, mergeManager.onDiskMapOutputs.size()); List paths = new ArrayList(); - Iterator iterator = mergeManager.onDiskMapOutputs.iterator(); + Iterator iterator = + mergeManager.onDiskMapOutputs.iterator(); List keys = new ArrayList(); List values = new ArrayList(); while (iterator.hasNext()) { CompressAwarePath next = iterator.next(); - readOnDiskMapOutput(conf, fs, next, keys, values); + readOnDiskMapOutput(jobConf, fs, next, keys, values); paths.add(next); } assertThat(keys).isEqualTo(Arrays.asList("apple", "banana", "carrot", @@ -189,8 +208,8 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { mergeManager.close(); mergeManager = new MergeManagerImpl( - reduceId2, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null, - null, null, new Progress(), new MROutputFiles()); + reduceId2, jobConf, fs, lda, Reporter.NULL, null, null, null, null, + null, null, null, new Progress(), new MROutputFiles()); MergeThread onDiskMerger = mergeManager.createOnDiskMerger(); onDiskMerger.merge(paths); @@ -199,7 +218,8 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { keys = new ArrayList(); values = new ArrayList(); - readOnDiskMapOutput(conf, fs, mergeManager.onDiskMapOutputs.iterator().next(), keys, values); + readOnDiskMapOutput(jobConf, fs, + mergeManager.onDiskMapOutputs.iterator().next(), keys, values); assertThat(keys).isEqualTo(Arrays.asList("apple", "apple", "banana", "banana", "carrot", "carrot")); assertThat(values).isEqualTo(Arrays.asList("awesome", "disgusting", @@ -227,7 +247,8 @@ private byte[] writeMapOutput(Configuration conf, Map keysToValu private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path, List keys, List values) throws IOException { - FSDataInputStream in = CryptoUtils.wrapIfNecessary(conf, fs.open(path)); + FSDataInputStream in = + IntermediateEncryptedStream.wrapIfNecessary(conf, fs.open(path), path); IFile.Reader reader = new IFile.Reader(conf, in, fs.getFileStatus(path).getLen(), null, null); @@ -257,14 +278,16 @@ public void testUncompressed() throws IOException { @SuppressWarnings( { "unchecked" }) public void testMergeShouldReturnProperProgress( List> segments) throws IOException { - Path tmpDir = new Path("localpath"); + + Path tmpDir = new Path(jobConf.get("mapreduce.cluster.temp.dir"), + "localpath"); Class keyClass = (Class) jobConf.getMapOutputKeyClass(); Class valueClass = (Class) jobConf.getMapOutputValueClass(); RawComparator comparator = jobConf.getOutputKeyComparator(); Counter readsCounter = new Counter(); Counter writesCounter = new Counter(); Progress mergePhase = new Progress(); - RawKeyValueIterator mergeQueue = Merger.merge(conf, fs, keyClass, + RawKeyValueIterator mergeQueue = Merger.merge(jobConf, fs, keyClass, valueClass, segments, 2, tmpDir, comparator, getReporter(), readsCounter, writesCounter, mergePhase); final float epsilon = 0.00001f; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLocalJobSubmission.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLocalJobSubmission.java index a3ea26e81f0b6..c8b6c894d0c4b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLocalJobSubmission.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLocalJobSubmission.java @@ -31,8 +31,20 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.SleepJob; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; +import org.apache.hadoop.mapreduce.security.SpillCallBackPathsFinder; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.junit.Assert.*; @@ -41,8 +53,39 @@ * -jt local -libjars */ public class TestLocalJobSubmission { - private static Path TEST_ROOT_DIR = - new Path(System.getProperty("test.build.data","/tmp")); + private static final Logger LOG = + LoggerFactory.getLogger(TestLocalJobSubmission.class); + + private static File testRootDir; + + @Rule + public TestName unitTestName = new TestName(); + private File unitTestDir; + private Path jarPath; + private Configuration config; + + @BeforeClass + public static void setupClass() throws Exception { + // setup the test root directory + testRootDir = + GenericTestUtils.setupTestRootDir(TestLocalJobSubmission.class); + } + + @Before + public void setup() throws IOException { + unitTestDir = new File(testRootDir, unitTestName.getMethodName()); + unitTestDir.mkdirs(); + config = createConfig(); + jarPath = makeJar(new Path(unitTestDir.getAbsolutePath(), "test.jar")); + } + + private Configuration createConfig() { + // Set the temp directories a subdir of the test directory. + Configuration conf = + MRJobConfUtil.setLocalDirectoriesConfigForTesting(null, unitTestDir); + conf.set(MRConfig.FRAMEWORK_NAME, "local"); + return conf; + } /** * Test the local job submission options of -jt local -libjars. @@ -51,12 +94,9 @@ public class TestLocalJobSubmission { */ @Test public void testLocalJobLibjarsOption() throws IOException { - Configuration conf = new Configuration(); - - testLocalJobLibjarsOption(conf); - - conf.setBoolean(Job.USE_WILDCARD_FOR_LIBJARS, false); - testLocalJobLibjarsOption(conf); + testLocalJobLibjarsOption(config); + config.setBoolean(Job.USE_WILDCARD_FOR_LIBJARS, false); + testLocalJobLibjarsOption(config); } /** @@ -67,8 +107,6 @@ public void testLocalJobLibjarsOption() throws IOException { */ private void testLocalJobLibjarsOption(Configuration conf) throws IOException { - Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar")); - conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000"); conf.set(MRConfig.FRAMEWORK_NAME, "local"); final String[] args = { @@ -79,8 +117,7 @@ private void testLocalJobLibjarsOption(Configuration conf) try { res = ToolRunner.run(conf, new SleepJob(), args); } catch (Exception e) { - System.out.println("Job failed with " + e.getLocalizedMessage()); - e.printStackTrace(System.out); + LOG.error("Job failed with {}", e.getLocalizedMessage(), e); fail("Job failed"); } assertEquals("dist job res is not 0:", 0, res); @@ -93,18 +130,20 @@ private void testLocalJobLibjarsOption(Configuration conf) */ @Test public void testLocalJobEncryptedIntermediateData() throws IOException { - Configuration conf = new Configuration(); - conf.set(MRConfig.FRAMEWORK_NAME, "local"); - conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); + config = MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(config); final String[] args = { "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" }; int res = -1; try { - res = ToolRunner.run(conf, new SleepJob(), args); + SpillCallBackPathsFinder spillInjector = + (SpillCallBackPathsFinder) IntermediateEncryptedStream + .setSpillCBInjector(new SpillCallBackPathsFinder()); + res = ToolRunner.run(config, new SleepJob(), args); + Assert.assertTrue("No spill occurred", + spillInjector.getEncryptedSpilledFiles().size() > 0); } catch (Exception e) { - System.out.println("Job failed with " + e.getLocalizedMessage()); - e.printStackTrace(System.out); + LOG.error("Job failed with {}", e.getLocalizedMessage(), e); fail("Job failed"); } assertEquals("dist job res is not 0:", 0, res); @@ -116,15 +155,13 @@ public void testLocalJobEncryptedIntermediateData() throws IOException { */ @Test public void testJobMaxMapConfig() throws Exception { - Configuration conf = new Configuration(); - conf.set(MRConfig.FRAMEWORK_NAME, "local"); - conf.setInt(MRJobConfig.JOB_MAX_MAP, 0); + config.setInt(MRJobConfig.JOB_MAX_MAP, 0); final String[] args = { "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" }; int res = -1; try { - res = ToolRunner.run(conf, new SleepJob(), args); + res = ToolRunner.run(config, new SleepJob(), args); fail("Job should fail"); } catch (IllegalArgumentException e) { assertTrue(e.getLocalizedMessage().contains( @@ -139,20 +176,16 @@ public void testJobMaxMapConfig() throws Exception { */ @Test public void testLocalJobFilesOption() throws IOException { - Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar")); - - Configuration conf = new Configuration(); - conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000"); - conf.set(MRConfig.FRAMEWORK_NAME, "local"); - final String[] args = - {"-jt", "local", "-files", jarPath.toString(), "-m", "1", "-r", "1", - "-mt", "1", "-rt", "1"}; + config.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000"); + final String[] args = { + "-jt", "local", "-files", jarPath.toString(), + "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" + }; int res = -1; try { - res = ToolRunner.run(conf, new SleepJob(), args); + res = ToolRunner.run(config, new SleepJob(), args); } catch (Exception e) { - System.out.println("Job failed with " + e.getLocalizedMessage()); - e.printStackTrace(System.out); + LOG.error("Job failed with {}", e.getLocalizedMessage(), e); fail("Job failed"); } assertEquals("dist job res is not 0:", 0, res); @@ -165,27 +198,22 @@ public void testLocalJobFilesOption() throws IOException { */ @Test public void testLocalJobArchivesOption() throws IOException { - Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar")); - - Configuration conf = new Configuration(); - conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000"); - conf.set(MRConfig.FRAMEWORK_NAME, "local"); + config.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000"); final String[] args = {"-jt", "local", "-archives", jarPath.toString(), "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"}; int res = -1; try { - res = ToolRunner.run(conf, new SleepJob(), args); + res = ToolRunner.run(config, new SleepJob(), args); } catch (Exception e) { - System.out.println("Job failed with " + e.getLocalizedMessage()); - e.printStackTrace(System.out); + LOG.error("Job failed with {}" + e.getLocalizedMessage(), e); fail("Job failed"); } assertEquals("dist job res is not 0:", 0, res); } private Path makeJar(Path p) throws IOException { - FileOutputStream fos = new FileOutputStream(new File(p.toString())); + FileOutputStream fos = new FileOutputStream(p.toString()); JarOutputStream jos = new JarOutputStream(fos); ZipEntry ze = new ZipEntry("test.jar.inside"); jos.putNextEntry(ze); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java deleted file mode 100644 index fa8dacf6dd507..0000000000000 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java +++ /dev/null @@ -1,327 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.util.Arrays; -import java.util.Collection; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.io.Writer; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.junit.Assert.*; - -@SuppressWarnings(value={"unchecked", "deprecation"}) -/** - * This test tests the support for a merge operation in Hadoop. The input files - * are already sorted on the key. This test implements an external - * MapOutputCollector implementation that just copies the records to different - * partitions while maintaining the sort order in each partition. The Hadoop - * framework's merge on the reduce side will merge the partitions created to - * generate the final output which is sorted on the key. - */ -@RunWith(Parameterized.class) -public class TestMRIntermediateDataEncryption { - private static final Logger LOG = - LoggerFactory.getLogger(TestMRIntermediateDataEncryption.class); - /** - * Use urandom to avoid the YarnChild process from hanging on low entropy - * systems. - */ - private static final String JVM_SECURITY_EGD_OPT = - "-Djava.security.egd=file:/dev/./urandom"; - // Where MR job's input will reside. - private static final Path INPUT_DIR = new Path("/test/input"); - // Where output goes. - private static final Path OUTPUT = new Path("/test/output"); - private static final int NUM_LINES = 1000; - private static MiniMRClientCluster mrCluster = null; - private static MiniDFSCluster dfsCluster = null; - private static FileSystem fs = null; - private static final int NUM_NODES = 2; - - private final String testTitle; - private final int numMappers; - private final int numReducers; - private final boolean isUber; - - /** - * List of arguments to run the JunitTest. - * @return - */ - @Parameterized.Parameters( - name = "{index}: TestMRIntermediateDataEncryption.{0} .. " - + "mappers:{1}, reducers:{2}, isUber:{3})") - public static Collection getTestParameters() { - return Arrays.asList(new Object[][]{ - {"testSingleReducer", 3, 1, false}, - {"testUberMode", 3, 1, true}, - {"testMultipleMapsPerNode", 8, 1, false}, - {"testMultipleReducers", 2, 4, false} - }); - } - - /** - * Initialized the parametrized JUnit test. - * @param testName the name of the unit test to be executed. - * @param mappers number of mappers in the tests. - * @param reducers number of the reducers. - * @param uberEnabled boolean flag for isUber - */ - public TestMRIntermediateDataEncryption(String testName, int mappers, - int reducers, boolean uberEnabled) { - this.testTitle = testName; - this.numMappers = mappers; - this.numReducers = reducers; - this.isUber = uberEnabled; - } - - @BeforeClass - public static void setupClass() throws Exception { - Configuration conf = new Configuration(); - conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); - - // Set the jvm arguments. - conf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, - JVM_SECURITY_EGD_OPT); - final String childJVMOpts = JVM_SECURITY_EGD_OPT - + " " + conf.get("mapred.child.java.opts", " "); - conf.set("mapred.child.java.opts", childJVMOpts); - - - // Start the mini-MR and mini-DFS clusters. - dfsCluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(NUM_NODES).build(); - mrCluster = - MiniMRClientClusterFactory.create( - TestMRIntermediateDataEncryption.class, NUM_NODES, conf); - mrCluster.start(); - } - - @AfterClass - public static void tearDown() throws IOException { - if (fs != null) { - fs.close(); - } - if (mrCluster != null) { - mrCluster.stop(); - } - if (dfsCluster != null) { - dfsCluster.shutdown(); - } - } - - @Before - public void setup() throws Exception { - LOG.info("Starting TestMRIntermediateDataEncryption#{}.......", testTitle); - fs = dfsCluster.getFileSystem(); - if (fs.exists(INPUT_DIR) && !fs.delete(INPUT_DIR, true)) { - throw new IOException("Could not delete " + INPUT_DIR); - } - if (fs.exists(OUTPUT) && !fs.delete(OUTPUT, true)) { - throw new IOException("Could not delete " + OUTPUT); - } - // Generate input. - createInput(fs, numMappers, NUM_LINES); - } - - @After - public void cleanup() throws IOException { - if (fs != null) { - if (fs.exists(OUTPUT)) { - fs.delete(OUTPUT, true); - } - if (fs.exists(INPUT_DIR)) { - fs.delete(INPUT_DIR, true); - } - } - } - - @Test(timeout=600000) - public void testMerge() throws Exception { - JobConf job = new JobConf(mrCluster.getConfig()); - job.setJobName("Test"); - JobClient client = new JobClient(job); - RunningJob submittedJob = null; - FileInputFormat.setInputPaths(job, INPUT_DIR); - FileOutputFormat.setOutputPath(job, OUTPUT); - job.set("mapreduce.output.textoutputformat.separator", " "); - job.setInputFormat(TextInputFormat.class); - job.setMapOutputKeyClass(Text.class); - job.setMapOutputValueClass(Text.class); - job.setOutputKeyClass(Text.class); - job.setOutputValueClass(Text.class); - job.setMapperClass(TestMRIntermediateDataEncryption.MyMapper.class); - job.setPartitionerClass( - TestMRIntermediateDataEncryption.MyPartitioner.class); - job.setOutputFormat(TextOutputFormat.class); - job.setNumReduceTasks(numReducers); - job.setInt("mapreduce.map.maxattempts", 1); - job.setInt("mapreduce.reduce.maxattempts", 1); - job.setInt("mapred.test.num_lines", NUM_LINES); - job.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, isUber); - job.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); - submittedJob = client.submitJob(job); - submittedJob.waitForCompletion(); - assertTrue("The submitted job is completed", submittedJob.isComplete()); - assertTrue("The submitted job is successful", submittedJob.isSuccessful()); - verifyOutput(fs, numMappers, NUM_LINES); - client.close(); - // wait for short period to cool down. - Thread.sleep(1000); - } - - private void createInput(FileSystem filesystem, int mappers, int numLines) - throws Exception { - for (int i = 0; i < mappers; i++) { - OutputStream os = - filesystem.create(new Path(INPUT_DIR, "input_" + i + ".txt")); - Writer writer = new OutputStreamWriter(os); - for (int j = 0; j < numLines; j++) { - // Create sorted key, value pairs. - int k = j + 1; - String formattedNumber = String.format("%09d", k); - writer.write(formattedNumber + " " + formattedNumber + "\n"); - } - writer.close(); - os.close(); - } - } - - private void verifyOutput(FileSystem fileSystem, - int mappers, int numLines) - throws Exception { - FSDataInputStream dis = null; - long numValidRecords = 0; - long numInvalidRecords = 0; - String prevKeyValue = "000000000"; - Path[] fileList = - FileUtil.stat2Paths(fileSystem.listStatus(OUTPUT, - new Utils.OutputFileUtils.OutputFilesFilter())); - for (Path outFile : fileList) { - try { - dis = fileSystem.open(outFile); - String record; - while((record = dis.readLine()) != null) { - // Split the line into key and value. - int blankPos = record.indexOf(" "); - String keyString = record.substring(0, blankPos); - String valueString = record.substring(blankPos+1); - // Check for sorted output and correctness of record. - if (keyString.compareTo(prevKeyValue) >= 0 - && keyString.equals(valueString)) { - prevKeyValue = keyString; - numValidRecords++; - } else { - numInvalidRecords++; - } - } - } finally { - if (dis != null) { - dis.close(); - dis = null; - } - } - } - // Make sure we got all input records in the output in sorted order. - assertEquals((long)(mappers * numLines), numValidRecords); - // Make sure there is no extraneous invalid record. - assertEquals(0, numInvalidRecords); - } - - /** - * A mapper implementation that assumes that key text contains valid integers - * in displayable form. - */ - public static class MyMapper extends MapReduceBase - implements Mapper { - private Text keyText; - private Text valueText; - - public MyMapper() { - keyText = new Text(); - valueText = new Text(); - } - - @Override - public void map(LongWritable key, Text value, - OutputCollector output, - Reporter reporter) throws IOException { - String record = value.toString(); - int blankPos = record.indexOf(" "); - keyText.set(record.substring(0, blankPos)); - valueText.set(record.substring(blankPos + 1)); - output.collect(keyText, valueText); - } - - public void close() throws IOException { - } - } - - /** - * Partitioner implementation to make sure that output is in total sorted - * order. We basically route key ranges to different reducers such that - * key values monotonically increase with the partition number. For example, - * in this test, the keys are numbers from 1 to 1000 in the form "000000001" - * to "000001000" in each input file. The keys "000000001" to "000000250" are - * routed to partition 0, "000000251" to "000000500" are routed to partition 1 - * and so on since we have 4 reducers. - */ - static class MyPartitioner implements Partitioner { - - private JobConf job; - - public MyPartitioner() { - } - - public void configure(JobConf job) { - this.job = job; - } - - public int getPartition(Text key, Text value, int numPartitions) { - int keyValue = 0; - try { - keyValue = Integer.parseInt(key.toString()); - } catch (NumberFormatException nfe) { - keyValue = 0; - } - int partitionNumber = (numPartitions * (Math.max(0, keyValue - 1))) / job - .getInt("mapred.test.num_lines", 10000); - return partitionNumber; - } - } -} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMROpportunisticMaps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMROpportunisticMaps.java index eed731ffd37e8..c2a966302cf66 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMROpportunisticMaps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMROpportunisticMaps.java @@ -26,6 +26,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.junit.Test; @@ -79,7 +80,8 @@ public void doTest(int numMappers, int numReducers, int numNodes, MiniMRClientCluster mrCluster = null; FileSystem fileSystem = null; try { - Configuration conf = new Configuration(); + Configuration conf = + MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(null); // Start the mini-MR and mini-DFS clusters conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true); conf.setBoolean(YarnConfiguration. @@ -149,7 +151,6 @@ private void runMergeTest(JobConf job, FileSystem fileSystem, int job.setInt("mapreduce.map.maxattempts", 1); job.setInt("mapreduce.reduce.maxattempts", 1); job.setInt("mapred.test.num_lines", numLines); - job.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); try { submittedJob = client.submitJob(job); try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMerge.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMerge.java index a9e7f64c0b8cc..b8a16e146e9e1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMerge.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMerge.java @@ -87,12 +87,12 @@ public void testMerge() throws Exception { // Run the test. runMergeTest(new JobConf(mrCluster.getConfig()), fileSystem); } finally { - if (dfsCluster != null) { - dfsCluster.shutdown(); - } if (mrCluster != null) { mrCluster.stop(); } + if (dfsCluster != null) { + dfsCluster.shutdown(); + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java index 0bf30c830b22f..dca39dfd71f99 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java @@ -22,7 +22,7 @@ import java.util.ArrayList; import java.util.Date; import java.util.List; -import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -30,7 +30,6 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat; import org.apache.hadoop.util.Tool; @@ -99,6 +98,15 @@ static int printUsage() { */ enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN } + public static String generateSentenceWithRand(ThreadLocalRandom rand, + int noWords) { + StringBuffer sentence = new StringBuffer(words[rand.nextInt(words.length)]); + for (int i = 1; i < noWords; i++) { + sentence.append(" ").append(words[rand.nextInt(words.length)]); + } + return sentence.toString(); + } + static class RandomTextMapper extends Mapper { private long numBytesToWrite; @@ -106,7 +114,6 @@ static class RandomTextMapper extends Mapper { private int wordsInKeyRange; private int minWordsInValue; private int wordsInValueRange; - private Random random = new Random(); /** * Save the configuration value that we need to write the data. @@ -127,12 +134,13 @@ public void setup(Context context) { public void map(Text key, Text value, Context context) throws IOException,InterruptedException { int itemCount = 0; + ThreadLocalRandom rand = ThreadLocalRandom.current(); while (numBytesToWrite > 0) { // Generate the key/value - int noWordsKey = minWordsInKey + - (wordsInKeyRange != 0 ? random.nextInt(wordsInKeyRange) : 0); - int noWordsValue = minWordsInValue + - (wordsInValueRange != 0 ? random.nextInt(wordsInValueRange) : 0); + int noWordsKey = minWordsInKey + + (wordsInKeyRange != 0 ? rand.nextInt(wordsInKeyRange) : 0); + int noWordsValue = minWordsInValue + + (wordsInValueRange != 0 ? rand.nextInt(wordsInValueRange) : 0); Text keyWords = generateSentence(noWordsKey); Text valueWords = generateSentence(noWordsValue); @@ -154,13 +162,9 @@ public void map(Text key, Text value, } private Text generateSentence(int noWords) { - StringBuffer sentence = new StringBuffer(); - String space = " "; - for (int i=0; i < noWords; ++i) { - sentence.append(words[random.nextInt(words.length)]); - sentence.append(space); - } - return new Text(sentence.toString()); + String sentence = + generateSentenceWithRand(ThreadLocalRandom.current(), noWords); + return new Text(sentence); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java new file mode 100644 index 0000000000000..79fcd4110ca27 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java @@ -0,0 +1,411 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.StringTokenizer; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLong; + +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.MiniMRClientCluster; +import org.apache.hadoop.mapred.MiniMRClientClusterFactory; +import org.apache.hadoop.mapred.Utils; + +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; +import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; +import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; +import org.apache.hadoop.mapreduce.security.SpillCallBackPathsFinder; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.ToolRunner; + +/** + * This class tests the support of Intermediate data encryption + * (Spill data encryption). + * It starts by generating random input text file ({@link RandomTextWriter}) + * using the {@link ToolRunner}. + * A wordCount job consumes the generated input. The final job is configured in + * a way to guarantee that data is spilled. + * mbs-per-map specifies the amount of data (in MBs) to generate per map. + * By default, this is twice the value of mapreduce.task.io.sort.mb + * map-tasks specifies the number of map tasks to run. + */ +@RunWith(Parameterized.class) +public class TestMRIntermediateDataEncryption { + /** + * The number of bytes generated by the input generator. + */ + public static final long TOTAL_MBS_DEFAULT = 128L; + public static final long BLOCK_SIZE_DEFAULT = 32 * 1024 * 1024L; + public static final int INPUT_GEN_NUM_THREADS = 16; + public static final long TASK_SORT_IO_MB_DEFAULT = 128L; + public static final String JOB_DIR_PATH = "jobs-data-path"; + private static final Logger LOG = + LoggerFactory.getLogger(TestMRIntermediateDataEncryption.class); + /** + * Directory of the test data. + */ + private static File testRootDir; + private static volatile BufferedWriter inputBufferedWriter; + private static Configuration commonConfig; + private static MiniDFSCluster dfsCluster; + private static MiniMRClientCluster mrCluster; + private static FileSystem fs; + private static Path jobInputDirPath; + private static long inputFileSize; + /** + * Test parameters. + */ + private final String testTitleName; + private final int numMappers; + private final int numReducers; + private final boolean isUber; + private Configuration config; + private Path jobOutputPath; + + /** + * Initialized the parametrized JUnit test. + * @param testName the name of the unit test to be executed. + * @param mappers number of mappers in the tests. + * @param reducers number of the reducers. + * @param uberEnabled boolean flag for isUber + */ + public TestMRIntermediateDataEncryption(String testName, int mappers, + int reducers, boolean uberEnabled) { + this.testTitleName = testName; + this.numMappers = mappers; + this.numReducers = reducers; + this.isUber = uberEnabled; + } + + /** + * List of arguments to run the JunitTest. + * @return + */ + @Parameterized.Parameters( + name = "{index}: TestMRIntermediateDataEncryption.{0} .. " + + "mappers:{1}, reducers:{2}, isUber:{3})") + public static Collection getTestParameters() { + return Arrays.asList(new Object[][]{ + {"testSingleReducer", 3, 1, false}, + {"testUberMode", 3, 1, true}, + {"testMultipleMapsPerNode", 8, 1, false}, + // TODO: The following configuration is commented out until + // MAPREDUCE-7325 is fixed. + // Setting multiple reducers breaks LocalJobRunner causing the + // unit test to fail. + // {"testMultipleReducers", 2, 4, false} + }); + } + + @BeforeClass + public static void setupClass() throws Exception { + // setup the test root directory + testRootDir = + GenericTestUtils.setupTestRootDir( + TestMRIntermediateDataEncryption.class); + // setup the base configurations and the clusters + final File dfsFolder = new File(testRootDir, "dfs"); + final Path jobsDirPath = new Path(JOB_DIR_PATH); + + commonConfig = createBaseConfiguration(); + dfsCluster = + new MiniDFSCluster.Builder(commonConfig, dfsFolder) + .numDataNodes(2).build(); + dfsCluster.waitActive(); + mrCluster = MiniMRClientClusterFactory.create( + TestMRIntermediateDataEncryption.class, 2, commonConfig); + mrCluster.start(); + fs = dfsCluster.getFileSystem(); + if (fs.exists(jobsDirPath) && !fs.delete(jobsDirPath, true)) { + throw new IOException("Could not delete JobsDirPath" + jobsDirPath); + } + fs.mkdirs(jobsDirPath); + jobInputDirPath = new Path(jobsDirPath, "in-dir"); + // run the input generator job. + Assert.assertEquals("Generating input should succeed", 0, + generateInputTextFile()); + } + + @AfterClass + public static void tearDown() throws IOException { + // shutdown clusters + if (mrCluster != null) { + mrCluster.stop(); + } + if (dfsCluster != null) { + dfsCluster.shutdown(); + } + // make sure that generated input file is deleted + final File textInputFile = new File(testRootDir, "input.txt"); + if (textInputFile.exists()) { + textInputFile.delete(); + } + } + + /** + * Creates a configuration object setting the common properties before + * initializing the clusters. + * @return configuration to be used as a base for the unit tests. + */ + private static Configuration createBaseConfiguration() { + // Set the jvm arguments to enable intermediate encryption. + Configuration conf = + MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(null); + // Set the temp directories a subdir of the test directory. + conf = MRJobConfUtil.setLocalDirectoriesConfigForTesting(conf, testRootDir); + conf.setLong("dfs.blocksize", BLOCK_SIZE_DEFAULT); + return conf; + } + + /** + * Creates a thread safe BufferedWriter to be used among the task generators. + * @return A synchronized BufferedWriter to the input file. + * @throws IOException + */ + private static synchronized BufferedWriter getTextInputWriter() + throws IOException { + if (inputBufferedWriter == null) { + final File textInputFile = new File(testRootDir, "input.txt"); + inputBufferedWriter = new BufferedWriter(new FileWriter(textInputFile)); + } + return inputBufferedWriter; + } + + /** + * Generates input text file of size TOTAL_MBS_DEFAULT. + * It creates a total INPUT_GEN_NUM_THREADS future tasks. + * + * @return the result of the input generation. 0 for success. + * @throws Exception + */ + private static int generateInputTextFile() throws Exception { + final File textInputFile = new File(testRootDir, "input.txt"); + final AtomicLong actualWrittenBytes = new AtomicLong(0); + // create INPUT_GEN_NUM_THREADS callables + final ExecutorService executor = + Executors.newFixedThreadPool(INPUT_GEN_NUM_THREADS); + //create a list to hold the Future object associated with Callable + final List> inputGenerators = new ArrayList<>(); + final Callable callableGen = new InputGeneratorTask(); + final long startTime = Time.monotonicNow(); + for (int i = 0; i < INPUT_GEN_NUM_THREADS; i++) { + //submit Callable tasks to be executed by thread pool + Future genFutureTask = executor.submit(callableGen); + inputGenerators.add(genFutureTask); + } + for (Future genFutureTask : inputGenerators) { + // print the return value of Future, notice the output delay in console + // because Future.get() waits for task to get completed + LOG.info("Received one task. Current total bytes: {}", + actualWrittenBytes.addAndGet(genFutureTask.get())); + } + getTextInputWriter().close(); + final long endTime = Time.monotonicNow(); + LOG.info("Finished generating input. Wrote {} bytes in {} seconds", + actualWrittenBytes.get(), ((endTime - startTime) * 1.0) / 1000); + executor.shutdown(); + // copy text file to HDFS deleting the source. + fs.mkdirs(jobInputDirPath); + Path textInputPath = + fs.makeQualified(new Path(jobInputDirPath, "input.txt")); + fs.copyFromLocalFile(true, new Path(textInputFile.getAbsolutePath()), + textInputPath); + if (!fs.exists(textInputPath)) { + // the file was not generated. Fail. + return 1; + } + // update the input size. + FileStatus[] fileStatus = + fs.listStatus(textInputPath); + inputFileSize = fileStatus[0].getLen(); + LOG.info("Text input file; path: {}, size: {}", + textInputPath, inputFileSize); + return 0; + } + + @Before + public void setup() throws Exception { + LOG.info("Starting TestMRIntermediateDataEncryption#{}.......", + testTitleName); + final Path jobDirPath = new Path(JOB_DIR_PATH, testTitleName); + if (fs.exists(jobDirPath) && !fs.delete(jobDirPath, true)) { + throw new IOException("Could not delete " + jobDirPath); + } + fs.mkdirs(jobDirPath); + jobOutputPath = new Path(jobDirPath, "out-dir"); + // Set the configuration for the job. + config = new Configuration(commonConfig); + config.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, isUber); + config.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 1.0F); + // set the configuration to make sure that we get spilled files + long ioSortMb = TASK_SORT_IO_MB_DEFAULT; + config.setLong(MRJobConfig.IO_SORT_MB, ioSortMb); + long mapMb = Math.max(2 * ioSortMb, config.getInt(MRJobConfig.MAP_MEMORY_MB, + MRJobConfig.DEFAULT_MAP_MEMORY_MB)); + // make sure the map tasks will spill to disk. + config.setLong(MRJobConfig.MAP_MEMORY_MB, mapMb); + config.set(MRJobConfig.MAP_JAVA_OPTS, "-Xmx" + (mapMb - 200) + "m"); + config.setInt(MRJobConfig.NUM_MAPS, numMappers); + // max attempts have to be set to 1 when intermediate encryption is enabled. + config.setInt("mapreduce.map.maxattempts", 1); + config.setInt("mapreduce.reduce.maxattempts", 1); + } + + @Test + public void testWordCount() throws Exception { + LOG.info("........Starting main Job Driver #{} starting at {}.......", + testTitleName, Time.formatTime(System.currentTimeMillis())); + Job job = Job.getInstance(config); + job.getConfiguration().setInt(MRJobConfig.NUM_MAPS, numMappers); + job.setJarByClass(TestMRIntermediateDataEncryption.class); + job.setJobName("mr-spill-" + testTitleName); + // Mapper configuration + job.setMapperClass(TokenizerMapper.class); + job.setInputFormatClass(TextInputFormat.class); + job.setCombinerClass(LongSumReducer.class); + FileInputFormat.setMinInputSplitSize(job, + (inputFileSize + numMappers) / numMappers); + // Reducer configuration + job.setReducerClass(LongSumReducer.class); + job.setNumReduceTasks(numReducers); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(LongWritable.class); + // Set the IO paths for the job. + FileInputFormat.addInputPath(job, jobInputDirPath); + FileOutputFormat.setOutputPath(job, jobOutputPath); + SpillCallBackPathsFinder spillInjector = + (SpillCallBackPathsFinder) IntermediateEncryptedStream + .setSpillCBInjector(new SpillCallBackPathsFinder()); + StringBuilder testSummary = + new StringBuilder(String.format("%n ===== test %s summary ======", + testTitleName)); + try { + long startTime = Time.monotonicNow(); + testSummary.append(String.format("%nJob %s ended at %s", + testTitleName, Time.formatTime(System.currentTimeMillis()))); + Assert.assertTrue(job.waitForCompletion(true)); + long endTime = Time.monotonicNow(); + testSummary.append(String.format("%nJob %s ended at %s", + job.getJobName(), Time.formatTime(System.currentTimeMillis()))); + testSummary.append(String.format("%n\tThe job took %.3f seconds", + (1.0 * (endTime - startTime)) / 1000)); + long spilledRecords = + job.getCounters().findCounter(TaskCounter.SPILLED_RECORDS).getValue(); + Assert.assertFalse( + "The encrypted spilled files should not be empty.", + spillInjector.getEncryptedSpilledFiles().isEmpty()); + Assert.assertTrue("Spill records must be greater than 0", + spilledRecords > 0); + Assert.assertTrue("Job Output path [" + jobOutputPath + "] should exist", + fs.exists(jobOutputPath)); + Assert.assertTrue("Invalid access to spill file positions", + spillInjector.getInvalidSpillEntries().isEmpty()); + FileStatus[] fileStatus = + fs.listStatus(jobOutputPath, + new Utils.OutputFileUtils.OutputFilesFilter()); + for (FileStatus fStatus : fileStatus) { + long fileSize = fStatus.getLen(); + testSummary.append( + String.format("%n\tOutput file %s: %d", + fStatus.getPath(), fileSize)); + } + } finally { + testSummary.append(spillInjector.getSpilledFileReport()); + LOG.info(testSummary.toString()); + IntermediateEncryptedStream.resetSpillCBInjector(); + } + } + + /** + * A callable implementation that generates a portion of the + * TOTAL_MBS_DEFAULT into {@link #inputBufferedWriter}. + */ + static class InputGeneratorTask implements Callable { + @Override + public Long call() throws Exception { + long bytesWritten = 0; + final ThreadLocalRandom rand = ThreadLocalRandom.current(); + final long totalBytes = 1024 * 1024 * TOTAL_MBS_DEFAULT; + final long bytesPerTask = totalBytes / INPUT_GEN_NUM_THREADS; + final String newLine = System.lineSeparator(); + final BufferedWriter writer = getTextInputWriter(); + while (bytesWritten < bytesPerTask) { + String sentence = + RandomTextWriter.generateSentenceWithRand(rand, rand.nextInt(5, 20)) + .concat(newLine); + writer.write(sentence); + bytesWritten += sentence.length(); + } + writer.flush(); + LOG.info("Task {} finished. Wrote {} bytes.", + Thread.currentThread().getName(), bytesWritten); + return bytesWritten; + } + } + + /** + * A Test tokenizer Mapper. + */ + public static class TokenizerMapper + extends Mapper { + + private final static LongWritable ONE = new LongWritable(1); + private final Text word = new Text(); + + public void map(Object key, Text value, + Context context) throws IOException, InterruptedException { + StringTokenizer itr = new StringTokenizer(value.toString()); + while (itr.hasMoreTokens()) { + word.set(itr.nextToken()); + context.write(word, ONE); + } + } + } +} From ef8bd3007342f2b41d39a3c72f9cbb4e22d2fb9f Mon Sep 17 00:00:00 2001 From: zhuqi Date: Tue, 16 Mar 2021 12:50:29 +0800 Subject: [PATCH 103/666] YARN-10689. Fix the finding bugs in extractFloatValueFromWeightConfig. (#2760) Signed-off-by: Akira Ajisaka --- .../scheduler/capacity/CapacitySchedulerConfiguration.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 5b2906062fc56..ce3c0cb3e0cbd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -775,7 +775,7 @@ private float extractFloatValueFromWeightConfig(String configureValue) { if (!configuredWeightAsCapacity(configureValue)) { return -1f; } else { - return Float.valueOf( + return Float.parseFloat( configureValue.substring(0, configureValue.indexOf(WEIGHT_SUFFIX))); } } From 296303eaa99cc0ce56cd7ded2e1dd0908928e4ba Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 16 Mar 2021 15:23:27 +0100 Subject: [PATCH 104/666] YARN-10682. The scheduler monitor policies conf should trim values separated by comma. Contributed by Qi Zhu. --- .../resourcemanager/monitor/SchedulingMonitorManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java index 5418e8b1f8b1f..f3c10ea0b71ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java @@ -61,7 +61,7 @@ private void updateSchedulingMonitors(Configuration conf, } // When monitor is enabled, loading policies - String[] configuredPolicies = conf.getStrings( + String[] configuredPolicies = conf.getTrimmedStrings( YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES); if (configuredPolicies == null || configuredPolicies.length == 0) { return; From cd8c907fbc9ca601303e3b786144c028e5b6dfba Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 16 Mar 2021 16:09:12 +0100 Subject: [PATCH 105/666] YARN-10686. Fix TestCapacitySchedulerAutoQueueCreation#testAutoQueueCreationFailsForEmptyPathWithAQCAndWeightMode. Contributed by Qi Zhu. --- .../capacity/TestCapacitySchedulerAutoQueueCreation.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java index be41a3307cd95..ece13c90bdd94 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java @@ -571,7 +571,7 @@ public void testAutoQueueCreationFailsForEmptyPathWithAQCAndWeightMode() ApplicationId appId = BuilderUtils.newApplicationId(1, 1); SchedulerEvent addAppEvent = new AppAddedSchedulerEvent( - appId, "user", "root."); + appId, "user", USER0); newCS.handle(addAppEvent); RMAppEvent event = new RMAppEvent(appId, RMAppEventType.APP_REJECTED, From db0c7cad23b41e6da97e5e43ae2cebc247df1edf Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Tue, 16 Mar 2021 21:36:51 +0530 Subject: [PATCH 106/666] HDFS-15843. Make write cross-platform (#2710) --- .../examples/c/connect_cancel/CMakeLists.txt | 5 +- .../c/connect_cancel/connect_cancel.c | 8 +-- .../examples/cc/connect_cancel/CMakeLists.txt | 3 +- .../cc/connect_cancel/connect_cancel.cc | 12 ++-- .../libhdfspp/lib/x-platform/CMakeLists.txt | 9 ++- .../native/libhdfspp/lib/x-platform/c_api.cc | 23 ++++++++ .../native/libhdfspp/lib/x-platform/c_api.h | 28 ++++++++++ .../native/libhdfspp/lib/x-platform/syscall.h | 56 +++++++++++++++++++ .../libhdfspp/lib/x-platform/syscall_linux.cc | 37 ++++++++++++ .../lib/x-platform/syscall_windows.cc | 42 ++++++++++++++ .../native/libhdfspp/lib/x-platform/utils.cc | 2 +- 11 files changed, 210 insertions(+), 15 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.cc create mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.h create mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h create mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc create mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt index 6276467f5ff7c..f645254d6b8fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt @@ -23,5 +23,6 @@ set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX}) include_directories( ${LIBHDFSPP_DIR}/include ) link_directories( ${LIBHDFSPP_DIR}/lib ) -add_executable(connect_cancel_c connect_cancel.c) -target_link_libraries(connect_cancel_c hdfspp_static uriparser2) \ No newline at end of file +add_executable(connect_cancel_c $ connect_cancel.c) +target_link_libraries(connect_cancel_c hdfspp_static uriparser2) +target_include_directories(connect_cancel_c PRIVATE ../../lib) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c index f6af6d13d6513..6f1bbfe84e0c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c @@ -26,10 +26,10 @@ #include #include #include -#include #include "hdfspp/hdfs_ext.h" #include "common/util_c.h" +#include "x-platform/c_api.h" #define ERROR_BUFFER_SIZE 1024 @@ -43,10 +43,10 @@ const char *catch_exit = "Exiting the signal handler.\n"; // Print to stdout without calling malloc or otherwise indirectly modify userspace state. // Write calls to stdout may still interleave with stuff coming from elsewhere. static void sighandler_direct_stdout(const char *msg) { - if(!msg) + if(!msg) { return; - ssize_t res = write(1 /*posix stdout fd*/, msg, strlen(msg)); - (void)res; + } + x_platform_syscall_write_to_stdout(msg); } static void sig_catch(int val) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt index e3cc0b51e86f5..c4bc7994c179c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt @@ -23,5 +23,6 @@ set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX}) include_directories( ${LIBHDFSPP_DIR}/include ) link_directories( ${LIBHDFSPP_DIR}/lib ) -add_executable(connect_cancel connect_cancel.cc) +add_executable(connect_cancel $ connect_cancel.cc) target_link_libraries(connect_cancel hdfspp_static) +target_include_directories(connect_cancel PRIVATE ../../lib) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/connect_cancel.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/connect_cancel.cc index 46eef9d897a7f..f409c48d07c8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/connect_cancel.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/connect_cancel.cc @@ -25,11 +25,12 @@ #include #include -#include #include #include +#include "x-platform/syscall.h" + // Simple example of how to cancel an async connect call. // Here Control-C (SIGINT) is caught in order to invoke the FS level cancel and // properly tear down the process. Valgrind should show no leaked memory on exit @@ -47,11 +48,10 @@ const std::string catch_exit("Exiting the signal handler.\n"); // It's possible that the write interleaves with another write call, // but it won't corrupt the stack or heap. static void sighandler_direct_stdout(const std::string &msg) { - ssize_t res = ::write(1 /*posix stdout FD*/, msg.data(), msg.size()); - // In production you'd want to check res, but error handling code will - // need to be fairly application specific if it's going to properly - // avoid reentrant calls to malloc. - (void)res; + XPlatform::Syscall::WriteToStdout(msg); + // In production you'd want to check the result of the above call, + // but error handling code will need to be fairly application + // specific if it's going to properly avoid reentrant calls to malloc. } // Signal handler to make a SIGINT call cancel rather than exit(). diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/CMakeLists.txt index c9bc3df282408..61deb317ff738 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/CMakeLists.txt @@ -16,4 +16,11 @@ # limitations under the License. # -add_library(x_platform_utils_obj OBJECT utils.cc) \ No newline at end of file +if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows") + set(SYSCALL_SRC syscall_windows.cc) +else() + set(SYSCALL_SRC syscall_linux.cc) +endif() + +add_library(x_platform_utils_obj OBJECT ${SYSCALL_SRC} utils.cc) +add_library(x_platform_utils_obj_c_api OBJECT $ c_api.cc) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.cc new file mode 100644 index 0000000000000..b905d2fbb07fb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.cc @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "syscall.h" + +extern "C" int x_platform_syscall_write_to_stdout(const char* msg) { + return XPlatform::Syscall::WriteToStdout(msg) ? 1 : 0; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.h new file mode 100644 index 0000000000000..4bc7008cd9a2e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.h @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_H +#define NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_H + +/** + * C APIs for accessing XPlatform + */ + +int x_platform_syscall_write_to_stdout(const char* msg); + +#endif // NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_H diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h new file mode 100644 index 0000000000000..d162f6ff5137b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_SYSCALL +#define NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_SYSCALL + +#include + +/** + * The {@link XPlatform} namespace contains components that + * aid in writing cross-platform code. + */ +namespace XPlatform { +class Syscall { + public: + /** + * Writes the given string to the application's + * standard output stream. + * + * @param message The string to write to stdout. + * @returns A boolean indicating whether the write + * was successful. + */ + static bool WriteToStdout(const std::string& message); + + /** + * Writes the given char pointer to the application's + * standard output stream. + * + * @param message The char pointer to write to stdout. + * @returns A boolean indicating whether the write + * was successful. + */ + static int WriteToStdout(const char* message); + + private: + static bool WriteToStdoutImpl(const char* message); +}; +} // namespace XPlatform + +#endif diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc new file mode 100644 index 0000000000000..e556d9976e8ad --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include "syscall.h" + +bool XPlatform::Syscall::WriteToStdout(const std::string& message) { + return WriteToStdoutImpl(message.c_str()); +} + +int XPlatform::Syscall::WriteToStdout(const char* message) { + return WriteToStdoutImpl(message) ? 1 : 0; +} + +bool XPlatform::Syscall::WriteToStdoutImpl(const char* message) { + const auto message_len = strlen(message); + const auto result = write(1, message, message_len); + return result == static_cast(message_len); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc new file mode 100644 index 0000000000000..06b0031d88033 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "syscall.h" + +bool XPlatform::Syscall::WriteToStdout(const std::string& message) { + return WriteToStdoutImpl(message.c_str()); +} + +int XPlatform::Syscall::WriteToStdout(const char* message) { + return WriteToStdoutImpl(message) ? 1 : 0; +} + +bool XPlatform::Syscall::WriteToStdoutImpl(const char* message) { + auto* const stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); + if (stdout_handle == INVALID_HANDLE_VALUE || stdout_handle == nullptr) { + return false; + } + + unsigned long bytes_written = 0; + const auto message_len = lstrlen(message); + const auto result = + WriteFile(stdout_handle, message, message_len, &bytes_written, nullptr); + return result && static_cast(message_len) == bytes_written; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/utils.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/utils.cc index 04c5a238a8c31..461b9f828f4a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/utils.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/utils.cc @@ -16,7 +16,7 @@ * limitations under the License. */ -#include "x-platform/utils.h" +#include "utils.h" #include #include From de0686a0ab0e5bd089dab36e41340b342c30e063 Mon Sep 17 00:00:00 2001 From: Wilfred Spiegelenburg Date: Wed, 17 Mar 2021 10:47:49 +1100 Subject: [PATCH 107/666] YARN-10652. Capacity Scheduler fails to handle user weights for a user that has a "." (dot) in it This only fixes the user name resolution for weights in the queues. It does not add generic support for user names with dots in all use cases in the capacity scheduler. Contributed by: Siddharth Ahuja --- .../capacity/CapacitySchedulerConfiguration.java | 2 +- .../scheduler/capacity/TestLeafQueue.java | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index ce3c0cb3e0cbd..efd56a2491966 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -1963,7 +1963,7 @@ public Map getAllUserWeightsForQueue(String queuePath) { getQueuePrefix(queuePath).replaceAll("\\.", "\\\\.") + USER_SETTINGS + "\\."; String weightKeyRegex = - qPathPlusPrefix + "\\w+\\." + USER_WEIGHT; + qPathPlusPrefix + "\\S+\\." + USER_WEIGHT; Map props = getValByRegex(weightKeyRegex); for (Entry e : props.entrySet()) { String userName = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 4b023107b39cb..75ab16f3a8460 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -1660,17 +1660,25 @@ public void testUserSpecificUserLimits() throws Exception { LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A)); // Set minimum-user-limit-percent for queue "a" in the configs. csConf.setUserLimit(a.getQueuePath(), 50); - // Set weight for "user_0" to be 1.5 for the a queue in the configs. + // Set weight for "user_0" to be 1.5f for the a queue in the configs. csConf.setFloat("yarn.scheduler.capacity." + a.getQueuePath() + ".user-settings.user_0." + CapacitySchedulerConfiguration.USER_WEIGHT, 1.5f); + // Set weight for "firstname.lastname" to be 0.7f for the a queue + // in the configs. Notice the user contains a dot. This is to test + // that weights are accepted for a username that contains dots. + csConf.setFloat("yarn.scheduler.capacity." + a.getQueuePath() + + ".user-settings.firstname.lastname." + + CapacitySchedulerConfiguration.USER_WEIGHT, + 0.7f); when(csContext.getClusterResource()) .thenReturn(Resources.createResource(16 * GB, 32)); // Verify that configs were updated and parsed correctly. Assert.assertNull(a.getUserWeights().get("user_0")); a.reinitialize(a, csContext.getClusterResource()); - assertEquals(1.5, a.getUserWeights().get("user_0").floatValue(), 0.0); + assertEquals(1.5f, a.getUserWeights().get("user_0"), 0.0f); + assertEquals(0.7f, a.getUserWeights().get("firstname.lastname"), 0.0f); // set maxCapacity a.setMaxCapacity(1.0f); From 977bf27473d5086a74b8ace0244ebbfd8e65d8e7 Mon Sep 17 00:00:00 2001 From: Hui Fei Date: Wed, 17 Mar 2021 09:11:30 +0800 Subject: [PATCH 108/666] HDFS-15898. Test case TestOfflineImageViewer fails (#2779) --- .../tools/offlineImageViewer/TestOfflineImageViewer.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 6ac12accff79c..7bf3bfc1f8e84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -74,6 +74,7 @@ import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.TimeZone; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -153,6 +154,7 @@ public class TestOfflineImageViewer { private static final long FILE_NODE_ID_3 = 16394; private static final long DIR_NODE_ID = 16391; private static final long SAMPLE_TIMESTAMP = 946684800000L; + private static TimeZone defaultTimeZone = null; // namespace as written to dfs, to be compared with viewer's output final static HashMap writtenFiles = Maps.newHashMap(); @@ -165,6 +167,8 @@ public class TestOfflineImageViewer { // multiple tests. @BeforeClass public static void createOriginalFSImage() throws IOException { + defaultTimeZone = TimeZone.getDefault(); + TimeZone.setDefault(TimeZone.getTimeZone("UTC")); File[] nnDirs = MiniDFSCluster.getNameNodeDirectory( MiniDFSCluster.getBaseDirectory(), 0, 0); tempDir = nnDirs[0]; @@ -370,6 +374,9 @@ public static void deleteOriginalFSImage() throws IOException { if (originalFsimage != null && originalFsimage.exists()) { originalFsimage.delete(); } + if (defaultTimeZone != null) { + TimeZone.setDefault(defaultTimeZone); + } } // Convenience method to generate a file status from file system for From 7b6eb640125c0f47db558c3a92256c6fce0c550f Mon Sep 17 00:00:00 2001 From: touchida <56789230+touchida@users.noreply.github.com> Date: Wed, 17 Mar 2021 12:00:05 +0900 Subject: [PATCH 109/666] HDFS-13975. TestBalancer#testMaxIterationTime fails sporadically (#2726) Co-authored-by: toshihiko.uchida Signed-off-by: Akira Ajisaka --- .../hdfs/server/balancer/TestBalancer.java | 32 +++++++------------ 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 9f65ffa1fea45..b94cebc8d492a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -1610,9 +1610,9 @@ public void testMaxIterationTime() throws Exception { conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize); // limit the worker thread count of Balancer to have only 1 queue per DN conf.setInt(DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY, 1); - // limit the bandwitdh to 1 packet per sec to emulate slow block moves + // limit the bandwidth to 4MB per sec to emulate slow block moves conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, - 64 * 1024); + 4 * 1024 * 1024); // set client socket timeout to have an IN_PROGRESS notification back from // the DataNode about the copy in every second. conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2000L); @@ -1643,31 +1643,21 @@ public void testMaxIterationTime() throws Exception { List connectors = Collections.emptyList(); try { BalancerParameters bParams = BalancerParameters.DEFAULT; + // set maxIdleIterations to 1 for NO_MOVE_PROGRESS to be + // reported when there is no block move connectors = NameNodeConnector.newNameNodeConnectors( DFSUtil.getInternalNsRpcUris(conf), Balancer.class.getSimpleName(), - Balancer.BALANCER_ID_PATH, conf, bParams.getMaxIdleIteration()); + Balancer.BALANCER_ID_PATH, conf, 1); for (NameNodeConnector nnc : connectors) { LOG.info("NNC to work on: " + nnc); Balancer b = new Balancer(nnc, bParams, conf); - long startTime = Time.monotonicNow(); Result r = b.runOneIteration(); - long runtime = Time.monotonicNow() - startTime; - assertEquals("We expect ExitStatus.IN_PROGRESS to be reported.", - ExitStatus.IN_PROGRESS, r.exitStatus); - // accept runtime if it is under 3.5 seconds, as we need to wait for - // IN_PROGRESS report from DN, and some spare to be able to finish. - // NOTE: This can be a source of flaky tests, if the box is busy, - // assertion here is based on the following: Balancer is already set - // up, iteration gets the blocks from the NN, and makes the decision - // to move 2 blocks. After that the PendingMoves are scheduled, and - // DataNode heartbeats in for the Balancer every second, iteration is - // two seconds long. This means that it will fail if the setup and the - // heartbeat from the DataNode takes more than 500ms, as the iteration - // should end at the 3rd second from start. As the number of - // operations seems to be pretty low, and all comm happens locally, I - // think the possibility of a failure due to node busyness is low. - assertTrue("Unexpected iteration runtime: " + runtime + "ms > 3.5s", - runtime < 3500); + // Since no block cannot be moved in 2 seconds (i.e., + // 4MB/s * 2s = 8MB < 10MB), NO_MOVE_PROGRESS will be reported. + // When a block move is not canceled in 2 seconds properly and then + // a block is moved unexpectedly, IN_PROGRESS will be reported. + assertEquals("We expect ExitStatus.NO_MOVE_PROGRESS to be reported.", + ExitStatus.NO_MOVE_PROGRESS, r.exitStatus); } } finally { for (NameNodeConnector nnc : connectors) { From 3ac288d9cf63f67e83dca18f525be6bd26335daf Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Wed, 17 Mar 2021 14:14:26 +0900 Subject: [PATCH 110/666] HDFS-15890. Improve the Logs for File Concat Operation. Contributed by Bhavik Patel. --- .../hdfs/server/namenode/FSDirConcatOp.java | 17 +++++------------ .../hdfs/server/namenode/NameNodeRpcServer.java | 2 ++ 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index 843f1a5464da3..04ae358c67afb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -52,9 +52,9 @@ static FileStatus concat(FSDirectory fsd, FSPermissionChecker pc, String target, String[] srcs, boolean logRetryCache) throws IOException { validatePath(target, srcs); assert srcs != null; - if (FSDirectory.LOG.isDebugEnabled()) { - FSDirectory.LOG.debug("concat {} to {}", Arrays.toString(srcs), target); - } + NameNode.stateChangeLog.debug("DIR* NameSystem.concat: {} to {}", + Arrays.toString(srcs), target); + final INodesInPath targetIIP = fsd.resolvePath(pc, target, DirOp.WRITE); // write permission for the target if (fsd.isPermissionEnabled()) { @@ -66,11 +66,6 @@ static FileStatus concat(FSDirectory fsd, FSPermissionChecker pc, // check the srcs INodeFile[] srcFiles = verifySrcFiles(fsd, srcs, targetIIP, pc); - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* NameSystem.concat: " + - Arrays.toString(srcs) + " to " + target); - } - long timestamp = now(); fsd.writeLock(); try { @@ -234,10 +229,8 @@ private static void verifyQuota(FSDirectory fsd, INodesInPath targetIIP, static void unprotectedConcat(FSDirectory fsd, INodesInPath targetIIP, INodeFile[] srcList, long timestamp) throws IOException { assert fsd.hasWriteLock(); - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* FSNamesystem.concat to " - + targetIIP.getPath()); - } + NameNode.stateChangeLog.debug("DIR* NameSystem.concat to {}", + targetIIP.getPath()); final INodeFile trgInode = targetIIP.getLastINode().asFile(); QuotaCounts deltas = computeQuotaDeltas(fsd, trgInode, srcList); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index cfb0d444051f9..0767a18b9ffed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1045,6 +1045,8 @@ public boolean rename(String src, String dst) throws IOException { @Override // ClientProtocol public void concat(String trg, String[] src) throws IOException { checkNNStartup(); + stateChangeLog.debug("*DIR* NameNode.concat: src path {} to" + + " target path {}", Arrays.toString(src), trg); namesystem.checkOperation(OperationCategory.WRITE); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { From 0523f7bedf2f5aaf3321cebe3e479a76ea72935c Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 17 Mar 2021 11:42:36 +0530 Subject: [PATCH 111/666] HDFS-15895 : Remove redundant String#format in DFSAdmin#printOpenFiles (#2772) --- .../main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index f8ff5fe99392a..c874d9290fa8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -1024,14 +1024,14 @@ public int listOpenFiles(String[] argv) throws IOException { private void printOpenFiles(RemoteIterator openFilesIterator) throws IOException { - System.out.println(String.format("%-20s\t%-20s\t%s", "Client Host", - "Client Name", "Open File Path")); + System.out.printf("%-20s\t%-20s\t%s%n", "Client Host", + "Client Name", "Open File Path"); while (openFilesIterator.hasNext()) { OpenFileEntry openFileEntry = openFilesIterator.next(); - System.out.println(String.format("%-20s\t%-20s\t%20s", + System.out.printf("%-20s\t%-20s\t%20s%n", openFileEntry.getClientMachine(), openFileEntry.getClientName(), - openFileEntry.getFilePath())); + openFileEntry.getFilePath()); } } From f33134c9433b073af7891d0d065d9af47a4bebfa Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Wed, 17 Mar 2021 13:38:20 +0100 Subject: [PATCH 112/666] YARN-10497. Fix an issue in CapacityScheduler which fails to delete queues. Contributed by Wangda Tan and Qi Zhu. --- .../conf/MutableCSConfigurationProvider.java | 12 +++++-- .../TestMutableCSConfigurationProvider.java | 33 +++++++++++++++++++ 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java index 5f262be83d346..b74414b93c4f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java @@ -68,10 +68,18 @@ public MutableCSConfigurationProvider(RMContext rmContext) { this.rmContext = rmContext; } + // Unit test can overwrite this method + protected Configuration getInitSchedulerConfig() { + Configuration initialSchedConf = new Configuration(false); + initialSchedConf. + addResource(YarnConfiguration.CS_CONFIGURATION_FILE); + return initialSchedConf; + } + @Override public void init(Configuration config) throws IOException { this.confStore = YarnConfigurationStoreFactory.getStore(config); - Configuration initialSchedConf = new Configuration(false); + Configuration initialSchedConf = getInitSchedulerConfig(); initialSchedConf.addResource(YarnConfiguration.CS_CONFIGURATION_FILE); this.schedConf = new Configuration(false); // We need to explicitly set the key-values in schedConf, otherwise @@ -231,7 +239,7 @@ private List getSiblingQueues(String queuePath, Configuration conf) { String childQueuesKey = CapacitySchedulerConfiguration.PREFIX + parentQueue + CapacitySchedulerConfiguration.DOT + CapacitySchedulerConfiguration.QUEUES; - return new ArrayList<>(conf.getStringCollection(childQueuesKey)); + return new ArrayList<>(conf.getTrimmedStringCollection(childQueuesKey)); } private Map constructKeyValueConfUpdate( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java index 0c9a3120566fe..aec2c21956471 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java @@ -217,6 +217,39 @@ public void testHDFSBackedProvider() throws Exception { } + @Test + public void testAddRemoveQueueWithSpacesInConfig() throws Exception { + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[] {" a , b, c" }); + + final String a = CapacitySchedulerConfiguration.ROOT + ".a"; + final String b = CapacitySchedulerConfiguration.ROOT + ".b"; + final String c = CapacitySchedulerConfiguration.ROOT + ".c"; + csConf.setCapacity(a, 0); + csConf.setCapacity(b, 50); + csConf.setCapacity(c, 50); + + confProvider = new MutableCSConfigurationProvider(rmContext) { + @Override + protected Configuration getInitSchedulerConfig() { + return csConf; + } + }; + + Configuration conf = new Configuration(); + conf.set(YarnConfiguration.SCHEDULER_CONFIGURATION_STORE_CLASS, + YarnConfiguration.MEMORY_CONFIGURATION_STORE); + confProvider.init(conf); + + SchedConfUpdateInfo update = new SchedConfUpdateInfo(); + update.getRemoveQueueInfo().add("root.a"); + + confProvider.logAndApplyMutation(UserGroupInformation + .getCurrentUser(), update); + } + private void writeConf(Configuration conf, String storePath) throws IOException { FileSystem fileSystem = FileSystem.get(new Configuration(conf)); From 7f6c37f3d7f64cecae0e56c90369d0abbaabb21a Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Wed, 17 Mar 2021 10:57:11 -0700 Subject: [PATCH 113/666] =?UTF-8?q?HADOOP-17578.=20Improve=20UGI=20debug?= =?UTF-8?q?=20log=20to=20help=20troubleshooting=20TokenCach=E2=80=A6=20(#2?= =?UTF-8?q?762)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../hadoop/security/UserGroupInformation.java | 7 ++++--- .../security/token/DelegationTokenIssuer.java | 17 ++++++++++++++++- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index f5007588036de..c3d7a55a10e6e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -1925,11 +1925,12 @@ public T doAs(PrivilegedExceptionAction action @InterfaceAudience.LimitedPrivate({"HDFS", "KMS"}) @InterfaceStability.Unstable public static void logUserInfo(Logger log, String caption, - UserGroupInformation ugi) throws IOException { + UserGroupInformation ugi) { if (log.isDebugEnabled()) { log.debug(caption + " UGI: " + ugi); - for (Token token : ugi.getTokens()) { - log.debug("+token:" + token); + for (Map.Entry> kv : + ugi.getCredentials().getTokenMap().entrySet()) { + log.debug("+token: {} -> {}", kv.getKey(), kv.getValue()); } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java index 70a53b7166870..7b0a78bcd3c0d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DelegationTokenIssuer.java @@ -21,6 +21,8 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; @@ -32,7 +34,7 @@ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Yarn"}) @InterfaceStability.Unstable public interface DelegationTokenIssuer { - + Logger TOKEN_LOG = LoggerFactory.getLogger(DelegationTokenIssuer.class); /** * The service name used as the alias for the token in the credential * token map. addDelegationTokens will use this to determine if @@ -88,15 +90,28 @@ static void collectDelegationTokens( final List> tokens) throws IOException { final String serviceName = issuer.getCanonicalServiceName(); // Collect token of the this issuer and then of its embedded children + if (TOKEN_LOG.isDebugEnabled()) { + TOKEN_LOG.debug("Search token for service {} in credentials", + serviceName); + } if (serviceName != null) { final Text service = new Text(serviceName); Token token = credentials.getToken(service); if (token == null) { + if (TOKEN_LOG.isDebugEnabled()) { + TOKEN_LOG.debug("Token for service {} not found in credentials," + + " try getDelegationToken.", serviceName); + } token = issuer.getDelegationToken(renewer); if (token != null) { tokens.add(token); credentials.addToken(service, token); } + } else { + if (TOKEN_LOG.isDebugEnabled()) { + TOKEN_LOG.debug("Token for service {} found in credentials," + + "skip getDelegationToken.", serviceName); + } } } // Now collect the tokens from the children. From 139b595b618343b04346e22b5a9ee72041ae0217 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Wed, 17 Mar 2021 18:11:37 +0000 Subject: [PATCH 114/666] YARN-10688. ClusterMetrics should support GPU capacity related metrics.. Contributed by Qi Zhu. --- .../resourcemanager/ClusterMetrics.java | 27 +++++++- .../TestCSAllocateCustomResource.java | 64 +++++++++++++++++++ 2 files changed, 89 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java index 37f4ec436daaa..7fe5cc9703b1b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java @@ -34,6 +34,8 @@ import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; @InterfaceAudience.Private @Metrics(context="yarn") @@ -56,13 +58,14 @@ public class ClusterMetrics { @Metric("Vcore Utilization") MutableGaugeLong utilizedVirtualCores; @Metric("Memory Capability") MutableGaugeLong capabilityMB; @Metric("Vcore Capability") MutableGaugeLong capabilityVirtualCores; + @Metric("GPU Capability") MutableGaugeLong capabilityGPUs; private static final MetricsInfo RECORD_INFO = info("ClusterMetrics", "Metrics for the Yarn Cluster"); private static volatile ClusterMetrics INSTANCE = null; private static MetricsRegistry registry; - + public static ClusterMetrics getMetrics() { if(!isInitialized.get()){ synchronized (ClusterMetrics.class) { @@ -206,10 +209,24 @@ public long getCapabilityVirtualCores() { return capabilityVirtualCores.value(); } + public long getCapabilityGPUs() { + if (capabilityGPUs == null) { + return 0; + } + + return capabilityGPUs.value(); + } + public void incrCapability(Resource res) { if (res != null) { capabilityMB.incr(res.getMemorySize()); capabilityVirtualCores.incr(res.getVirtualCores()); + Integer gpuIndex = ResourceUtils.getResourceTypeIndex() + .get(ResourceInformation.GPU_URI); + if (gpuIndex != null) { + capabilityGPUs.incr(res. + getResourceValue(ResourceInformation.GPU_URI)); + } } } @@ -217,6 +234,12 @@ public void decrCapability(Resource res) { if (res != null) { capabilityMB.decr(res.getMemorySize()); capabilityVirtualCores.decr(res.getVirtualCores()); + Integer gpuIndex = ResourceUtils.getResourceTypeIndex() + .get(ResourceInformation.GPU_URI); + if (gpuIndex != null) { + capabilityGPUs.decr(res. + getResourceValue(ResourceInformation.GPU_URI)); + } } } @@ -251,4 +274,4 @@ public long getUtilizedVirtualCores() { public void incrUtilizedVirtualCores(long delta) { utilizedVirtualCores.incr(delta); } -} +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java index 65473b9eea7d8..d6f15446441d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java @@ -22,18 +22,22 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmissionData; import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmitter; +import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.resource.TestResourceProfiles; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ClusterNodeTracker; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; @@ -47,8 +51,12 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION_MB; +import static org.junit.Assert.assertEquals; /** * Test case for custom resource container allocation. @@ -64,6 +72,9 @@ public class TestCSAllocateCustomResource { private final int g = 1024; + private ClusterNodeTracker nodeTracker; + private ClusterMetrics metrics; + @Before public void setUp() throws Exception { conf = new YarnConfiguration(); @@ -182,4 +193,57 @@ public void testCapacitySchedulerInitWithCustomResourceType() .getResourceValue("yarn.io/gpu")); rm.close(); } + + @Test + public void testClusterMetricsWithGPU() + throws Exception { + metrics = ClusterMetrics.getMetrics(); + // reset resource types + ResourceUtils.resetResourceTypes(); + String resourceTypesFileName = "resource-types-test.xml"; + File source = new File( + conf.getClassLoader().getResource(resourceTypesFileName).getFile()); + resourceTypesFile = new File(source.getParent(), "resource-types.xml"); + FileUtils.copyFile(source, resourceTypesFile); + + CapacitySchedulerConfiguration newConf = + (CapacitySchedulerConfiguration) TestUtils + .getConfigurationWithMultipleQueues(conf); + newConf.setClass(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, + DominantResourceCalculator.class, ResourceCalculator.class); + //start RM + MockRM rm = new MockRM(newConf); + rm.start(); + + nodeTracker = new ClusterNodeTracker<>(); + MockNodes.resetHostIds(); + Resource nodeResource = Resource.newInstance(4096, 4, + Collections.singletonMap(GPU_URI, 4L)); + List rmNodes = + MockNodes.newNodes(2, 4, nodeResource); + for (RMNode rmNode : rmNodes) { + nodeTracker.addNode(new FiCaSchedulerNode(rmNode, false)); + } + + // Check GPU inc related cluster metrics. + assertEquals("Cluster Capability Memory incorrect", + metrics.getCapabilityMB(), (4096 * 8)); + assertEquals("Cluster Capability Vcores incorrect", + metrics.getCapabilityVirtualCores(), 4 * 8); + assertEquals("Cluster Capability GPUs incorrect", + metrics.getCapabilityGPUs(), 4 * 8); + + for (RMNode rmNode : rmNodes) { + nodeTracker.removeNode(rmNode.getNodeID()); + } + + // Check GPU dec related cluster metrics. + assertEquals("Cluster Capability Memory incorrect", + metrics.getCapabilityMB(), 0); + assertEquals("Cluster Capability Vcores incorrect", + metrics.getCapabilityVirtualCores(), 0); + assertEquals("Cluster Capability GPUs incorrect", + metrics.getCapabilityGPUs(), 0); + ClusterMetrics.destroy(); + } } From 206f6962b98655c7165262bafc460d95e02a5e54 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Thu, 18 Mar 2021 11:49:16 +0100 Subject: [PATCH 115/666] YARN-10685. Fix typos in AbstractCSQueue. Contributed by Qi Zhu. --- .../scheduler/capacity/AbstractCSQueue.java | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 2d85d618a7d54..5c0f99a14ae77 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -114,7 +114,8 @@ public abstract class AbstractCSQueue implements CSQueue { private final boolean fullPathQueueNamingPolicy = false; - // Track capacities like used-capcity/abs-used-capacity/capacity/abs-capacity, + // Track capacities like + // used-capacity/abs-used-capacity/capacity/abs-capacity, // etc. QueueCapacities queueCapacities; @@ -962,7 +963,7 @@ private long getInheritedDefaultAppLifetime(CSQueue q, if (defaultAppLifetime < 0) { // If default lifetime was not set at this level but was set somewhere in // the parent's hierarchy, set default lifetime to parent queue's default - // only if parent queue's lifetime is less than current queueu's max + // only if parent queue's lifetime is less than current queue's max // lifetime. Otherwise, use current queue's max lifetime value for its // default lifetime. if (defaultAppLifetimeWasSpecifiedInConfig) { @@ -1012,7 +1013,7 @@ private boolean isIntraQueueHierarchyPreemptionDisabled(CSQueue q, // At this point, the master preemption switch is enabled down to this // queue's level. Determine whether or not intra-queue preemption is enabled - // down to this queu's level and return that value. + // down to this queue's level and return that value. return configuration.getIntraQueuePreemptionDisabled(q.getQueuePath(), parentQ.getIntraQueuePreemptionDisabledInHierarchy()); } @@ -1065,7 +1066,7 @@ boolean canAssignToThisQueue(Resource clusterResource, // Doing this because: for non-exclusive allocation, we make sure there's // idle resource on the partition, to avoid wastage, such resource will be // leveraged as much as we can, and preemption policy will reclaim it back - // when partitoned-resource-request comes back. + // when partitioned-resource-request comes back. Resource currentLimitResource = getCurrentLimitResource(nodePartition, clusterResource, currentResourceLimits, schedulingMode); @@ -1115,7 +1116,7 @@ boolean canAssignToThisQueue(Resource clusterResource, // Can not assign to this queue if (LOG.isDebugEnabled()) { LOG.debug("Failed to assign to queue: " + getQueuePath() - + " nodePatrition: " + nodePartition + + " nodePartition: " + nodePartition + ", usedResources: " + queueUsage.getUsed(nodePartition) + ", clusterResources: " + clusterResource + ", reservedResources: " + resourceCouldBeUnreserved From 0a388fd83898004696e2ce349b494e1baf206a1e Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Thu, 18 Mar 2021 12:43:01 +0100 Subject: [PATCH 116/666] YARN-10659. Improve CS MappingRule %secondary_group evaluation. Contributed by Gergely Pollak --- .../placement/CSMappingPlacementRule.java | 31 +++-- .../placement/VariableContext.java | 49 ++++++- .../MappingRuleConditionalVariable.java | 22 ++++ .../MappingRuleConditionalVariables.java | 123 ++++++++++++++++++ .../csmappingrule/MappingRuleResult.java | 2 + .../TestCSMappingPlacementRule.java | 111 +++++++++++++--- ...tCapacitySchedulerQueueMappingFactory.java | 10 +- 7 files changed, 315 insertions(+), 33 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleConditionalVariable.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleConditionalVariables.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java index 04dbb649372dc..07aeb1297c6d1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java @@ -37,6 +37,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Set; @@ -184,6 +185,10 @@ private void setupGroupsForVariableContext(VariableContext vctx, String user) LOG.warn( "Group provider hasn't been set, cannot query groups for user {}", user); + //enforcing empty primary group instead of null, which would be considered + //as unknown variable and would evaluate to '%primary_group' + vctx.put("%primary_group", ""); + vctx.put("%secondary_group", ""); return; } Set groupsSet = groups.getGroupsSet(user); @@ -192,24 +197,32 @@ private void setupGroupsForVariableContext(VariableContext vctx, String user) vctx.putExtraDataset("groups", groupsSet); return; } - String secondaryGroup = null; Iterator it = groupsSet.iterator(); String primaryGroup = it.next(); + ArrayList secondaryGroupList = new ArrayList<>(); + while (it.hasNext()) { - String group = it.next(); - if (this.queueManager.getQueue(group) != null) { - secondaryGroup = group; - break; - } + secondaryGroupList.add(it.next()); } - if (secondaryGroup == null && LOG.isDebugEnabled()) { - LOG.debug("User {} is not associated with any Secondary group", user); + if (secondaryGroupList.size() == 0) { + //if we have no chance to have a secondary group to speed up evaluation + //we simply register it as a regular variable with "" as a value + vctx.put("%secondary_group", ""); + if (LOG.isDebugEnabled()) { + LOG.debug("User {} does not have any potential Secondary group", user); + } + } else { + vctx.putConditional( + MappingRuleConditionalVariables.SecondaryGroupVariable.VARIABLE_NAME, + new MappingRuleConditionalVariables.SecondaryGroupVariable( + this.queueManager, + secondaryGroupList + )); } vctx.put("%primary_group", primaryGroup); - vctx.put("%secondary_group", secondaryGroup); vctx.putExtraDataset("groups", groupsSet); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/VariableContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/VariableContext.java index 0a30e926b3e6c..d60e7b5630ae1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/VariableContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/VariableContext.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.placement; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule.MappingRuleConditionalVariable; import java.util.Arrays; import java.util.HashMap; @@ -38,6 +39,13 @@ public class VariableContext { * This is our actual variable store. */ private Map variables = new HashMap<>(); + + /** + * This is our conditional variable store. + */ + private Map conditionalVariables = + new HashMap<>(); + /** * This set contains the names of the immutable variables if null it is * ignored. @@ -106,10 +114,32 @@ public VariableContext put(String name, String value) { throw new IllegalStateException( "Variable '" + name + "' is immutable, cannot update it's value!"); } + + if (conditionalVariables.containsKey(name)) { + throw new IllegalStateException( + "Variable '" + name + "' is already defined as a conditional" + + " variable, cannot change it's value!"); + } variables.put(name, value); return this; } + /** + * This method is used to add a conditional variable to the variable context. + * @param name Name of the variable + * @param variable The conditional variable evaluator + * @return VariableContext for daisy chaining + */ + public VariableContext putConditional(String name, + MappingRuleConditionalVariable variable) { + if (conditionalVariables.containsKey(name)) { + throw new IllegalStateException( + "Variable '" + name + "' is conditional, cannot update it's value!"); + } + conditionalVariables.put(name, variable); + return this; + } + /** * Returns the value of a variable, null values are replaced with "". * @param name Name of the variable @@ -213,10 +243,21 @@ public String replacePathVariables(String input) { String[] parts = input.split("\\."); for (int i = 0; i < parts.length; i++) { - //if the part is a variable it should be in the map, otherwise we keep - //it's original value. This means undefined variables will return the - //name of the variable, but this is working as intended. - String newVal = variables.getOrDefault(parts[i], parts[i]); + String newVal = parts[i]; + //if the part is a variable it should be in either the variable or the + //conditional variable map, otherwise we keep it's original value. + //This means undefined variables will return the name of the variable, + //but this is working as intended. + if (variables.containsKey(parts[i])) { + newVal = variables.get(parts[i]); + } else if (conditionalVariables.containsKey(parts[i])) { + MappingRuleConditionalVariable condVariable = + conditionalVariables.get(parts[i]); + if (condVariable != null) { + newVal = condVariable.evaluateInPath(parts, i); + } + } + //if a variable's value is null, we use empty string instead if (newVal == null) { newVal = ""; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleConditionalVariable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleConditionalVariable.java new file mode 100644 index 0000000000000..9efc52f92844b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleConditionalVariable.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; + +public interface MappingRuleConditionalVariable { + String evaluateInPath(String[] parts, int currentIndex); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleConditionalVariables.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleConditionalVariables.java new file mode 100644 index 0000000000000..fcf191e258cad --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleConditionalVariables.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager; + +import java.util.List; + +public class MappingRuleConditionalVariables { + /** + * Utility class, hiding constructor. + */ + private MappingRuleConditionalVariables() {} + + /** + * SecondaryGroupVariable represents a conditional variable which is supposed + * to evaluate path parts with "%secondary_group". The evaluation depends on + * if parent path is provided. + * If there was no parent path provided, the %secondary_group variable will be + * equal to the first non-primary group of the user which has a matching queue + * in the queue hierarchy. This means the queue name must be disambiguous as + * well. + * If there is a parent provided (the %secondary_group variable is not the + * first element in the path), the %secondary_group variable will be + * equal to the first non-primary group of the user which has a matching queue + * UNDER the parent path. The parent path must be a full path, to avoid + * ambiguity problems. + */ + public static class SecondaryGroupVariable implements + MappingRuleConditionalVariable { + /** + * This is the name of the variable we are replacing. + */ + public final static String VARIABLE_NAME = "%secondary_group"; + + /** + * We need an instance of queue manager in order to look for queues under + * the parent path. + */ + private CapacitySchedulerQueueManager queueManager; + /** + * We store the potential secondary_groups candidates in this list, it must + * not contain the primary group. + */ + private List potentialGroups; + + /** + * Constructor requires a queue manager instance and a list of potential + * secondary groups. + * @param qm The queue manager which will be used to check which potential + * secondary group should be used. + * @param groups List of potential secondary groups. + */ + public SecondaryGroupVariable(CapacitySchedulerQueueManager qm, + List groups) { + queueManager = qm; + potentialGroups = groups; + } + + /** + * Method used to evaluate the variable when used in a path. + * @param parts Split representation of the path. + * @param currentIndex The index of the evaluation in the path. This shows + * which part is currently being evaluated. + * @return Substituted queue path part, this method will only return the + * value of the conditional variable, not the whole path. + */ + public String evaluateInPath(String[] parts, int currentIndex) { + //First we need to determine the parent path (if any) + StringBuilder parentBuilder = new StringBuilder(); + //Building the parent prefix, if we don't have any parent path + //in case of currentIndex == 0 we will have an empty prefix. + for (int i = 0; i < currentIndex; i++) { + parentBuilder.append(parts[i]); + //Generally this is not a good idea, we would need a condition, to not + //append a '.' after the last part, however we are generating parent + //prefix paths, so we need paths prefixes, like 'root.group.something.' + parentBuilder.append("."); + } + + //We'll use this prefix to lookup the groups, when we have a parent + //provided we need to find a queue under that parent, which matches the + //name of the secondaryGroup, if we don't have a parent the prefix is + //empty + String lookupPrefix = parentBuilder.toString(); + + //Going through the potential groups to check if there is a matching queue + for (String group : potentialGroups) { + String path = lookupPrefix + group; + if (queueManager.getQueue(path) != null) { + return group; + } + } + + //No valid group found + return ""; + } + + @Override + public String toString() { + return "SecondaryGroupVariable{" + + "variableName='" + VARIABLE_NAME + "'," + + "groups=" + potentialGroups + + "}"; + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleResult.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleResult.java index ed61e64ac637a..85f398166a2f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleResult.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleResult.java @@ -153,6 +153,8 @@ public MappingRuleResultType getResult() { /** * Generator method for place results. * @param queue The name of the queue in which we shall place the application + * @param allowCreate Flag to indicate if the placement rule is allowed to + * create a queue if possible. * @return The generated MappingRuleResult */ public static MappingRuleResult createPlacementResult( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java index c3cd6dfdc6883..f6bb2a1587836 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java @@ -64,7 +64,7 @@ public class TestCSMappingPlacementRule { public TemporaryFolder folder = new TemporaryFolder(); private Map> userGroups = ImmutableMap.of( - "alice", ImmutableSet.of("p_alice", "user", "developer"), + "alice", ImmutableSet.of("p_alice", "unique", "user"), "bob", ImmutableSet.of("p_bob", "user", "developer"), "charlie", ImmutableSet.of("p_charlie", "user", "tester"), "dave", ImmutableSet.of("user"), @@ -79,6 +79,8 @@ private void createQueueHierarchy(CapacitySchedulerQueueManager queueManager) { .withManagedParentQueue("root.man") .withQueue("root.user.alice") .withQueue("root.user.bob") + .withQueue("root.secondaryTests.unique") + .withQueue("root.secondaryTests.user") .withQueue("root.ambiguous.user.charlie") .withQueue("root.ambiguous.user.dave") .withQueue("root.ambiguous.user.ambi") @@ -91,13 +93,12 @@ private void createQueueHierarchy(CapacitySchedulerQueueManager queueManager) { .withManagedParentQueue("root.ambiguous.deep.managed") .withQueue("root.disambiguous.deep.disambiuser.emily") .withQueue("root.disambiguous.deep.disambiuser.disambi") - .withQueue("root.disambiguous.deep.group.developer") + .withManagedParentQueue("root.disambiguous.deep.group.developer") .withManagedParentQueue("root.disambiguous.deep.dman") .withDynamicParentQueue("root.dynamic") .build(); when(queueManager.getQueue(isNull())).thenReturn(null); - when(queueManager.isAmbiguous("primarygrouponly")).thenReturn(true); } private CSMappingPlacementRule setupEngine( @@ -479,14 +480,6 @@ public void testSpecified() throws IOException { "queue 'root.user.bob'", engine, appBob, "alice", "root.user.bob"); } - private MappingRule createGroupMapping(String group, String queue) { - MappingRuleMatcher matcher = MappingRuleMatchers.createUserGroupMatcher(group); - MappingRuleAction action = - (new MappingRuleActions.PlaceToQueueAction(queue, true)) - .setFallbackReject(); - return new MappingRule(matcher, action); - } - @Test public void testGroupTargetMatching() throws IOException { ArrayList rules = new ArrayList<>(); @@ -502,7 +495,7 @@ public void testGroupTargetMatching() throws IOException { new MappingRule( MappingRuleMatchers.createUserMatcher("bob"), (new MappingRuleActions.PlaceToQueueAction( - "root.dynamic.%secondary_group.%user", true)) + "root.disambiguous.deep.group.%secondary_group.%user", true)) .setFallbackReject())); rules.add( @@ -526,10 +519,11 @@ public void testGroupTargetMatching() throws IOException { "Alice should be placed to root.man.p_alice based on her primary group", engine, app, "alice", "root.man.p_alice"); assertPlace( - "Bob should be placed to root.dynamic.developer.bob based on his " + - "secondary group, since we have a queue named 'developer', bob " + + "Bob should be placed to root.disambiguous.deep.group.developer.bob" + + "based on his secondary group, since we have a queue named" + + "'developer', under the path 'root.disambiguous.deep.group' bob " + "identifies as a user with secondary_group 'developer'", engine, app, - "bob", "root.dynamic.developer.bob"); + "bob", "root.disambiguous.deep.group.developer.bob"); assertReject("Charlie should get rejected because he neither of his" + "groups have an ambiguous queue, so effectively he has no secondary " + "group", engine, app, "charlie"); @@ -537,6 +531,93 @@ public void testGroupTargetMatching() throws IOException { engine, app, "dave"); } + @Test + public void testSecondaryGroupWithoutParent() throws IOException { + ArrayList rules = new ArrayList<>(); + + rules.add( + new MappingRule( + MappingRuleMatchers.createUserMatcher("alice"), + (new MappingRuleActions.PlaceToQueueAction( + "%secondary_group", false)) + .setFallbackReject())); + + rules.add( + new MappingRule( + MappingRuleMatchers.createUserMatcher("bob"), + (new MappingRuleActions.PlaceToQueueAction( + "%secondary_group.%user", true)) + .setFallbackReject())); + + rules.add( + new MappingRule( + MappingRuleMatchers.createUserMatcher("charlie"), + (new MappingRuleActions.PlaceToQueueAction( + "%secondary_group", true)) + .setFallbackReject())); + CSMappingPlacementRule engine = setupEngine(true, rules); + ApplicationSubmissionContext app = createApp("app"); + + assertPlace( + "Alice should be placed to root.secondaryTests.unique because " + + "'unique' is a globally unique queue, and she has a matching group", + engine, app, "alice", "root.secondaryTests.unique"); + assertPlace( + "Bob should be placed to root.disambiguous.deep.group.developer.bob " + + "because 'developer' is a globally unique PARENT queue, and he " + + "has a matching group name, and can create a queue with '%user' " + + "under it", engine, app, "bob", + "root.disambiguous.deep.group.developer.bob"); + assertReject("Charlie should get rejected because neither of his" + + "groups have a disambiguous queue, so effectively he has no " + + "secondary group", engine, app, "charlie"); + } + + + @Test + public void testSecondaryGroupWithParent() throws IOException { + ArrayList rules = new ArrayList<>(); + + rules.add( + new MappingRule( + MappingRuleMatchers.createUserMatcher("alice"), + (new MappingRuleActions.PlaceToQueueAction( + "root.secondaryTests.%secondary_group", false)) + .setFallbackReject())); + + rules.add( + new MappingRule( + MappingRuleMatchers.createUserMatcher("bob"), + (new MappingRuleActions.PlaceToQueueAction( + "root.secondaryTests.%secondary_group", true)) + .setFallbackReject())); + + rules.add( + new MappingRule( + MappingRuleMatchers.createUserMatcher("charlie"), + (new MappingRuleActions.PlaceToQueueAction( + "root.%secondary_group", true)) + .setFallbackReject())); + CSMappingPlacementRule engine = setupEngine(true, rules); + ApplicationSubmissionContext app = createApp("app"); + + assertPlace( + "Alice should be placed to root.secondaryTests.unique because " + + "both her secondary groups 'user' and 'unique' are eligible " + + "for being a secondary group under root.secondaryTests, but " + + "'unique' precedes 'user' in the group list.", + engine, app, "alice", "root.secondaryTests.unique"); + assertPlace( + "Bob should be placed to root.secondaryTests.user " + + "bob is member of group 'user' and while 'user' is globally not " + + "unique it is a valid secondary group target under queue " + + "root.secondaryTests.", + engine, app, "bob", "root.secondaryTests.user"); + assertReject("Charlie should get rejected because neither of his" + + "groups have a matching queue under root.", engine, app, "charlie"); + } + + void assertConfigTestResult(List rules) { assertEquals("We only specified one rule", 1, rules.size()); MappingRule rule = rules.get(0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java index 6a478a06e49b7..6f9ca5c354085 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java @@ -472,8 +472,8 @@ public void testFixedUserWithDynamicGroupQueue() throws Exception { // u:b4:c.%secondary_group QueueMapping userQueueMapping3 = QueueMappingBuilder.create() .type(QueueMapping.MappingType.USER) - .source("e") - .queue("c.%secondary_group") + .source("b4") + .queue("root.b.%secondary_group") .build(); queueMappingsForUG.add(userQueueMapping1); @@ -508,9 +508,9 @@ public void testFixedUserWithDynamicGroupQueue() throws Exception { ApplicationPlacementContext ctx1 = r.getPlacementForApp(asc, "a1"); assertEquals("Queue", "a1group", ctx1.getQueue()); - ApplicationPlacementContext ctx2 = r.getPlacementForApp(asc, "e"); - assertEquals("Queue", "esubgroup1", ctx2.getQueue()); - assertEquals("Queue", "root.c", ctx2.getParentQueue()); + ApplicationPlacementContext ctx2 = r.getPlacementForApp(asc, "b4"); + assertEquals("Queue", "b4subgroup1", ctx2.getQueue()); + assertEquals("Queue", "root.b", ctx2.getParentQueue()); } finally { if (mockRM != null) { mockRM.close(); From 10495aa4df261338728bfceed909a25c1a003dd6 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Thu, 18 Mar 2021 12:45:28 +0100 Subject: [PATCH 117/666] YARN-10692. Add Node GPU Utilization and apply to NodeMetrics. Contributed by Qi Zhu. --- .../nodemanager/NodeResourceMonitorImpl.java | 28 ++++++++++++ .../gpu/GpuNodeResourceUpdateHandler.java | 18 ++++++++ .../metrics/NodeManagerMetrics.java | 10 +++++ .../TestContainerManagerRecovery.java | 9 ++-- .../gpu/TestGpuResourcePlugin.java | 43 +++++++++++++++++++ .../metrics/TestNodeManagerMetrics.java | 9 +++- 6 files changed, 112 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java index c39e52ede73b3..098e7c76da7c2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java @@ -20,8 +20,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.GpuNodeResourceUpdateHandler; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.GpuResourcePlugin; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.slf4j.Logger; @@ -46,6 +49,10 @@ public class NodeResourceMonitorImpl extends AbstractService implements /** Resource calculator. */ private ResourceCalculatorPlugin resourceCalculatorPlugin; + /** Gpu related plugin. */ + private GpuResourcePlugin gpuResourcePlugin; + private GpuNodeResourceUpdateHandler gpuNodeResourceUpdateHandler; + /** Current resource utilization of the node. */ private ResourceUtilization nodeUtilization = ResourceUtilization.newInstance(0, 0, 0f); @@ -72,6 +79,18 @@ protected void serviceInit(Configuration conf) throws Exception { this.resourceCalculatorPlugin = ResourceCalculatorPlugin.getNodeResourceMonitorPlugin(conf); + if (nmContext.getResourcePluginManager() != null) { + this.gpuResourcePlugin = + (GpuResourcePlugin)nmContext.getResourcePluginManager(). + getNameToPlugins().get(ResourceInformation.GPU_URI); + + if (gpuResourcePlugin != null) { + this.gpuNodeResourceUpdateHandler = + (GpuNodeResourceUpdateHandler)gpuResourcePlugin. + getNodeResourceHandlerInstance(); + } + } + LOG.info(" Using ResourceCalculatorPlugin : " + this.resourceCalculatorPlugin); } @@ -152,6 +171,14 @@ public void run() { (int) (vmem >> 20), // B -> MB vcores); // Used Virtual Cores + float nodeGpuUtilization = 0F; + try { + nodeGpuUtilization = + gpuNodeResourceUpdateHandler.getNodeGpuUtilization(); + } catch (Exception e) { + LOG.error("Get Node GPU Utilization error: " + e); + } + // Publish the node utilization metrics to node manager // metrics system. NodeManagerMetrics nmMetrics = nmContext.getNodeManagerMetrics(); @@ -159,6 +186,7 @@ public void run() { nmMetrics.setNodeUsedMemGB(nodeUtilization.getPhysicalMemory()); nmMetrics.setNodeUsedVMemGB(nodeUtilization.getVirtualMemory()); nmMetrics.setNodeCpuUtilization(nodeUtilization.getCPU()); + nmMetrics.setNodeGpuUtilization(nodeGpuUtilization); } try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java index afb0d7eda23e5..af81709566d18 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java @@ -26,12 +26,14 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.NodeResourceUpdaterPlugin; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.PerGpuDeviceInformation; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI; @@ -76,4 +78,20 @@ public void updateConfiguredResource(Resource res) throws YarnException { res.setResourceValue(GPU_URI, nUsableGpus); } + + public float getNodeGpuUtilization() throws Exception{ + List gpuList = + gpuDiscoverer.getGpuDeviceInformation().getGpus(); + Float totalGpuUtilization = 0F; + if (gpuList != null && + gpuList.size() != 0) { + + totalGpuUtilization = gpuList + .stream() + .map(g -> g.getGpuUtilizations().getOverallGpuUtilization()) + .collect(Collectors.summingDouble(Float::floatValue)) + .floatValue() / gpuList.size(); + } + return totalGpuUtilization; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java index abe452981680b..848b9445289a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java @@ -98,6 +98,8 @@ public class NodeManagerMetrics { MutableGaugeInt nodeUsedVMemGB; @Metric("Current CPU utilization") MutableGaugeFloat nodeCpuUtilization; + @Metric("Current GPU utilization") + MutableGaugeFloat nodeGpuUtilization; @Metric("Missed localization requests in bytes") MutableCounterLong localizedCacheMissBytes; @@ -428,6 +430,14 @@ public void setNodeCpuUtilization(float cpuUtilization) { this.nodeCpuUtilization.set(cpuUtilization); } + public void setNodeGpuUtilization(float nodeGpuUtilization) { + this.nodeGpuUtilization.set(nodeGpuUtilization); + } + + public float getNodeGpuUtilization() { + return nodeGpuUtilization.value(); + } + private void updateLocalizationHitRatios() { updateLocalizationHitRatio(localizedCacheHitBytes, localizedCacheMissBytes, localizedCacheHitBytesRatio); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java index 826cc02219bc2..c67ae86f95ef2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java @@ -437,14 +437,16 @@ public void testNodeManagerMetricsRecovery() throws Exception { waitForNMContainerState(cm, cid, org.apache.hadoop.yarn.server.nodemanager .containermanager.container.ContainerState.RUNNING); - TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, 1, 1, 1, 9, 1, 7); + TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, + 1, 1, 1, 9, 1, 7, 0F); // restart and verify metrics could be recovered cm.stop(); DefaultMetricsSystem.shutdown(); metrics = NodeManagerMetrics.create(); metrics.addResource(Resource.newInstance(10240, 8)); - TestNodeManagerMetrics.checkMetrics(0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8); + TestNodeManagerMetrics.checkMetrics(0, 0, 0, 0, 0, 0, + 0, 0, 10, 0, 8, 0F); context = createContext(conf, stateStore); cm = createContainerManager(context, delSrvc); cm.init(conf); @@ -452,7 +454,8 @@ public void testNodeManagerMetricsRecovery() throws Exception { assertEquals(1, context.getApplications().size()); app = context.getApplications().get(appId); assertNotNull(app); - TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, 1, 1, 1, 9, 1, 7); + TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, + 1, 1, 1, 9, 1, 7, 0F); cm.stop(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java index 10e6f5afa9511..749e0cc14de91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java @@ -21,11 +21,13 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.GpuDeviceInformation; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.NMGpuResourceInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.PerGpuDeviceInformation; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.PerGpuUtilizations; import org.junit.Assert; import org.junit.Test; import java.util.List; @@ -122,4 +124,45 @@ public void testGetNMResourceInfoAutoDiscoveryDisabled() (NMGpuResourceInfo) target.getNMResourceInfo(); Assert.assertNull(resourceInfo.getGpuDeviceInformation()); } + + @Test + public void testNodeGPUUtilization() + throws Exception { + GpuDiscoverer gpuDiscoverer = createNodeGPUUtilizationDiscoverer(); + + GpuNodeResourceUpdateHandler gpuNodeResourceUpdateHandler = + new GpuNodeResourceUpdateHandler(gpuDiscoverer, new Configuration()); + + Assert.assertEquals(0.5F, + gpuNodeResourceUpdateHandler.getNodeGpuUtilization(), 1e-6); + } + + private GpuDiscoverer createNodeGPUUtilizationDiscoverer() + throws YarnException { + GpuDiscoverer gpuDiscoverer = mock(GpuDiscoverer.class); + + PerGpuDeviceInformation gpu1 = + new PerGpuDeviceInformation(); + PerGpuUtilizations perGpuUtilizations1 = + new PerGpuUtilizations(); + perGpuUtilizations1.setOverallGpuUtilization(0.4F); + + gpu1.setGpuUtilizations(perGpuUtilizations1); + + PerGpuDeviceInformation gpu2 = + new PerGpuDeviceInformation(); + PerGpuUtilizations perGpuUtilizations2 = + new PerGpuUtilizations(); + perGpuUtilizations2.setOverallGpuUtilization(0.6F); + gpu2.setGpuUtilizations(perGpuUtilizations2); + + List gpus = Lists.newArrayList(); + gpus.add(gpu1); + gpus.add(gpu2); + + GpuDeviceInformation gpuDeviceInfo = new GpuDeviceInformation(); + gpuDeviceInfo.setGpus(gpus); + when(gpuDiscoverer.getGpuDeviceInformation()).thenReturn(gpuDeviceInfo); + return gpuDiscoverer; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java index c5f80ba958a52..37454747c9219 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java @@ -100,11 +100,15 @@ public void testReferenceOfSingletonJvmMetrics() { metrics.addContainerLaunchDuration(1); Assert.assertTrue(metrics.containerLaunchDuration.changed()); + // Set node gpu utilization + metrics.setNodeGpuUtilization(35.5F); + // availableGB is expected to be floored, // while allocatedGB is expected to be ceiled. // allocatedGB: 3.75GB allocated memory is shown as 4GB // availableGB: 4.25GB available memory is shown as 4GB - checkMetrics(10, 1, 1, 1, 1, 1, 4, 7, 4, 13, 3); + checkMetrics(10, 1, 1, 1, 1, + 1, 4, 7, 4, 13, 3, 35.5F); // Update resource and check available resource again metrics.addResource(total); @@ -116,7 +120,7 @@ public void testReferenceOfSingletonJvmMetrics() { public static void checkMetrics(int launched, int completed, int failed, int killed, int initing, int running, int allocatedGB, int allocatedContainers, int availableGB, int allocatedVCores, - int availableVCores) { + int availableVCores, Float nodeGpuUtilization) { MetricsRecordBuilder rb = getMetrics("NodeManagerMetrics"); assertCounter("ContainersLaunched", launched, rb); assertCounter("ContainersCompleted", completed, rb); @@ -129,6 +133,7 @@ public static void checkMetrics(int launched, int completed, int failed, assertGauge("AllocatedContainers", allocatedContainers, rb); assertGauge("AvailableGB", availableGB, rb); assertGauge("AvailableVCores",availableVCores, rb); + assertGauge("NodeGpuUtilization", nodeGpuUtilization, rb); } } From 2fb1a81ef5b8405ab63900d45407cf090b219b9f Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Thu, 18 Mar 2021 13:40:16 +0100 Subject: [PATCH 118/666] YARN-10641. Refactor the max app related update, and fix maxApllications update error when add new queues. Contributed by Qi Zhu. --- .../scheduler/capacity/AbstractCSQueue.java | 81 ++++++++++--------- .../CapacitySchedulerConfiguration.java | 7 ++ .../scheduler/capacity/LeafQueue.java | 32 +++----- ...CapacitySchedulerNewQueueAutoCreation.java | 57 +++++++++++++ 4 files changed, 118 insertions(+), 59 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 5c0f99a14ae77..06575be4c7a3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -1489,6 +1489,44 @@ private Resource getMinResourceNormalized(String name, return ret; } + void updateMaxAppRelatedField(CapacitySchedulerConfiguration conf, + LeafQueue leafQueue, String label) { + int maxApplications = conf.getMaximumApplicationsPerQueue(queuePath); + if (maxApplications < 0) { + int maxGlobalPerQueueApps = conf.getGlobalMaximumApplicationsPerQueue(); + if (maxGlobalPerQueueApps > 0) { + // In absolute mode, should + // shrink when change to corresponding label capacity. + maxApplications = this.capacityConfigType + != CapacityConfigType.ABSOLUTE_RESOURCE ? + maxGlobalPerQueueApps : + (int) (maxGlobalPerQueueApps * queueCapacities + .getAbsoluteCapacity(label)); + } else{ + maxApplications = (int) (conf.getMaximumSystemApplications() + * queueCapacities.getAbsoluteCapacity(label)); + } + } + leafQueue.setMaxApplications(maxApplications); + + int maxApplicationsPerUser = Math.min(maxApplications, + (int) (maxApplications + * (leafQueue.getUsersManager().getUserLimit() / 100.0f) + * leafQueue.getUsersManager().getUserLimitFactor())); + if (leafQueue.getUsersManager().getUserLimitFactor() == -1) { + maxApplicationsPerUser = maxApplications; + } + + leafQueue.setMaxApplicationsPerUser(maxApplicationsPerUser); + LOG.info("LeafQueue:" + leafQueue.getQueuePath() + + "update max app related, maxApplications=" + + maxApplications + ", maxApplicationsPerUser=" + + maxApplicationsPerUser + ", Abs Cap:" + queueCapacities + .getAbsoluteCapacity(label) + ", Cap: " + queueCapacities + .getCapacity(label) + ", MaxCap : " + queueCapacities + .getMaximumCapacity(label)); + } + private void deriveCapacityFromAbsoluteConfigurations(String label, Resource clusterResource, ResourceCalculator rc) { @@ -1522,42 +1560,6 @@ private void deriveCapacityFromAbsoluteConfigurations(String label, queueCapacities.setAbsoluteMaximumCapacity(label, queueCapacities.getMaximumCapacity(label) * parent.getQueueCapacities() .getAbsoluteMaximumCapacity(label)); - - // Re-visit max applications for a queue based on absolute capacity if - // needed. - if (this instanceof LeafQueue) { - LeafQueue leafQueue = (LeafQueue) this; - CapacitySchedulerConfiguration conf = csContext.getConfiguration(); - int maxApplications = conf.getMaximumApplicationsPerQueue(queuePath); - if (maxApplications < 0) { - int maxGlobalPerQueueApps = conf.getGlobalMaximumApplicationsPerQueue(); - if (maxGlobalPerQueueApps > 0) { - maxApplications = (int) (maxGlobalPerQueueApps * queueCapacities - .getAbsoluteCapacity(label)); - } else{ - maxApplications = - (int) (conf.getMaximumSystemApplications() * queueCapacities - .getAbsoluteCapacity(label)); - } - } - leafQueue.setMaxApplications(maxApplications); - - int maxApplicationsPerUser = Math.min(maxApplications, - (int) (maxApplications - * (leafQueue.getUsersManager().getUserLimit() / 100.0f) - * leafQueue.getUsersManager().getUserLimitFactor())); - if (leafQueue.getUsersManager().getUserLimitFactor() == -1) { - maxApplicationsPerUser = maxApplications; - } - - leafQueue.setMaxApplicationsPerUser(maxApplicationsPerUser); - LOG.info("LeafQueue:" + leafQueue.getQueuePath() + ", maxApplications=" - + maxApplications + ", maxApplicationsPerUser=" - + maxApplicationsPerUser + ", Abs Cap:" + queueCapacities - .getAbsoluteCapacity(label) + ", Cap: " + queueCapacities - .getCapacity(label) + ", MaxCap : " + queueCapacities - .getMaximumCapacity(label)); - } } void updateEffectiveResources(Resource clusterResource) { @@ -1603,6 +1605,13 @@ void updateEffectiveResources(Resource clusterResource) { // percentage, we have to calculate percentage and update. ResourceCalculator rc = this.csContext.getResourceCalculator(); deriveCapacityFromAbsoluteConfigurations(label, clusterResource, rc); + // Re-visit max applications for a queue based on absolute capacity if + // needed. + if (this instanceof LeafQueue) { + LeafQueue leafQueue = (LeafQueue) this; + CapacitySchedulerConfiguration conf = csContext.getConfiguration(); + updateMaxAppRelatedField(conf, leafQueue, label); + } } else{ queueResourceQuotas.setEffectiveMinResource(label, Resources .multiply(resourceByLabel, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index efd56a2491966..5bae2b375f1a8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -477,6 +477,13 @@ public int getMaximumApplicationsPerQueue(String queue) { return maxApplicationsPerQueue; } + @VisibleForTesting + public void setMaximumApplicationsPerQueue(String queue, + int numMaxApps) { + setInt(getQueuePrefix(queue) + MAXIMUM_APPLICATIONS_SUFFIX, + numMaxApps); + } + /** * Get the maximum am resource percent per queue setting. * @param queue name of the queue diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 71e65cb4367c7..dce3115b82f1a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -87,6 +87,7 @@ public class LeafQueue extends AbstractCSQueue { private float absoluteUsedCapacity = 0.0f; + // TODO the max applications should consider label protected int maxApplications; protected volatile int maxApplicationsPerUser; @@ -1915,28 +1916,6 @@ private void updateCurrentResourceLimits( currentResourceLimits.getLimit())); } - private void updateAbsoluteCapacitiesAndRelatedFields() { - updateAbsoluteCapacities(); - CapacitySchedulerConfiguration schedulerConf = csContext.getConfiguration(); - - // If maxApplications not set, use the system total max app, apply newly - // calculated abs capacity of the queue. - if (maxApplications <= 0) { - int maxSystemApps = schedulerConf. - getMaximumSystemApplications(); - maxApplications = - (int) (maxSystemApps * queueCapacities.getAbsoluteCapacity()); - } - maxApplicationsPerUser = - Math.min(maxApplications, - (int) (maxApplications * (usersManager.getUserLimit() / 100.0f) - * usersManager.getUserLimitFactor())); - - if (getUserLimitFactor() == -1) { - maxApplicationsPerUser = maxApplications; - } - } - @Override public void updateClusterResource(Resource clusterResource, ResourceLimits currentResourceLimits) { @@ -1944,7 +1923,14 @@ public void updateClusterResource(Resource clusterResource, try { lastClusterResource = clusterResource; - updateAbsoluteCapacitiesAndRelatedFields(); + updateAbsoluteCapacities(); + + // If maxApplications not set, use the system total max app, apply newly + // calculated abs capacity of the queue. + // When add new queue, the parent queue's other children should also + // update the max app. + super.updateMaxAppRelatedField(csContext.getConfiguration(), + this, CommonNodeLabelsManager.NO_LABEL); super.updateEffectiveResources(clusterResource); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java index f493fe6078af9..c403d23f07b98 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java @@ -549,6 +549,63 @@ public void testAutoCreateQueueUserLimitDisabled() throws Exception { user0LeafQueue.getMinimumAllocation()).getMemorySize(), 1e-6); } + @Test + public void testAutoQueueCreationMaxAppUpdate() throws Exception { + startScheduler(); + + // When no conf for max apps + LeafQueue a1 = (LeafQueue)cs. + getQueue("root.a.a1"); + Assert.assertNotNull(a1); + Assert.assertEquals(csConf.getMaximumSystemApplications() + * a1.getAbsoluteCapacity(), a1.getMaxApplications(), 1); + + LeafQueue b = (LeafQueue)cs. + getQueue("root.b"); + Assert.assertNotNull(b); + Assert.assertEquals(csConf.getMaximumSystemApplications() + * b.getAbsoluteCapacity(), b.getMaxApplications(), 1); + + createQueue("root.e"); + + // Make sure other children queues + // max app correct. + LeafQueue e = (LeafQueue)cs. + getQueue("root.e"); + Assert.assertNotNull(e); + Assert.assertEquals(csConf.getMaximumSystemApplications() + * e.getAbsoluteCapacity(), e.getMaxApplications(), 1); + + a1 = (LeafQueue)cs. + getQueue("root.a.a1"); + Assert.assertNotNull(a1); + Assert.assertEquals(csConf.getMaximumSystemApplications() + * a1.getAbsoluteCapacity(), a1.getMaxApplications(), 1); + + b = (LeafQueue)cs. + getQueue("root.b"); + Assert.assertNotNull(b); + Assert.assertEquals(csConf.getMaximumSystemApplications() + * b.getAbsoluteCapacity(), b.getMaxApplications(), 1); + + // When update global max app per queue + csConf.setGlobalMaximumApplicationsPerQueue(1000); + cs.reinitialize(csConf, mockRM.getRMContext()); + Assert.assertEquals(1000, b.getMaxApplications()); + Assert.assertEquals(1000, a1.getMaxApplications()); + Assert.assertEquals(1000, e.getMaxApplications()); + + // when set some queue for max apps + csConf.setMaximumApplicationsPerQueue("root.e1", 50); + createQueue("root.e1"); + LeafQueue e1 = (LeafQueue)cs. + getQueue("root.e1"); + Assert.assertNotNull(e1); + + cs.reinitialize(csConf, mockRM.getRMContext()); + Assert.assertEquals(50, e1.getMaxApplications()); + } + @Test public void testAutoCreateQueueIfAmbiguousQueueNames() throws Exception { startScheduler(); From 623a142acc3d27625e1a36966f4b305c92754c20 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Thu, 18 Mar 2021 17:45:09 +0000 Subject: [PATCH 119/666] YARN-10703. Fix potential null pointer error of gpuNodeResourceUpdateHandler in NodeResourceMonitorImpl. Contributed by Qi Zhu. --- .../yarn/server/nodemanager/NodeResourceMonitorImpl.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java index 098e7c76da7c2..7577b55899781 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java @@ -173,8 +173,10 @@ public void run() { float nodeGpuUtilization = 0F; try { - nodeGpuUtilization = - gpuNodeResourceUpdateHandler.getNodeGpuUtilization(); + if (gpuNodeResourceUpdateHandler != null) { + nodeGpuUtilization = + gpuNodeResourceUpdateHandler.getNodeGpuUtilization(); + } } catch (Exception e) { LOG.error("Get Node GPU Utilization error: " + e); } From 98496a4421eecb16c7bb54eb845e5329b48f0c79 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Thu, 18 Mar 2021 19:36:50 +0000 Subject: [PATCH 120/666] YARN-10701. The yarn.resource-types should support multi types without trimmed. Contributed by Qi Zhu. --- .../org/apache/hadoop/yarn/util/resource/ResourceUtils.java | 3 ++- .../src/test/resources/resource-types-test.xml | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java index f7e7f35a89e27..f7c75a6079ffb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java @@ -251,7 +251,8 @@ public static Resource fetchMaximumAllocationFromConfig(Configuration conf) { private static Map getResourceInformationMapFromConfig( Configuration conf) { Map resourceInformationMap = new HashMap<>(); - String[] resourceNames = conf.getStrings(YarnConfiguration.RESOURCE_TYPES); + String[] resourceNames = + conf.getTrimmedStrings(YarnConfiguration.RESOURCE_TYPES); if (resourceNames != null && resourceNames.length != 0) { for (String resourceName : resourceNames) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/resource-types-test.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/resource-types-test.xml index e68df3e10a953..be4f07428c521 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/resource-types-test.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/resource-types-test.xml @@ -17,6 +17,6 @@ yarn.resource-types - yarn.io/gpu + yarn.io/gpu, yarn.io/fpga \ No newline at end of file From b9cfa0a3f1fa38fe18453bd4c7ef643e55bf8410 Mon Sep 17 00:00:00 2001 From: Narges Shadab <54193416+Nargeshdb@users.noreply.github.com> Date: Fri, 19 Mar 2021 01:36:48 -0700 Subject: [PATCH 121/666] HDFS-15868. Possible Resource Leak in EditLogFileOutputStream (#2736). Contributed by Narges Shadab. Reviewed-by: He Xiaoqiao --- .../hdfs/server/namenode/EditLogFileOutputStream.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java index 493a542d1b2dd..96b732be7feed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java @@ -88,7 +88,12 @@ public EditLogFileOutputStream(Configuration conf, File name, int size) } else { rp = new RandomAccessFile(name, "rws"); } - fp = new FileOutputStream(rp.getFD()); // open for append + try { + fp = new FileOutputStream(rp.getFD()); // open for append + } catch (IOException e) { + IOUtils.closeStream(rp); + throw e; + } fc = rp.getChannel(); fc.position(fc.size()); } From 6a6fed26f35f7d143d10bc7ccd29a0704fd76e4b Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 19 Mar 2021 14:19:49 +0530 Subject: [PATCH 122/666] HADOOP-17594. DistCp: Expose the JobId for applications executing through run method (#2786). Contributed by Ayush Saxena. Signed-off-by: Mingliang Liu Signed-off-by: Steve Loughran --- .../main/java/org/apache/hadoop/tools/DistCp.java | 2 ++ .../tools/contract/AbstractContractDistCpTest.java | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java index ca64cab50998d..24d1ed0e63dcc 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java @@ -225,6 +225,8 @@ public Job createAndSubmitJob() throws Exception { String jobID = job.getJobID().toString(); job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); + // Set the jobId for the applications running through run method. + getConf().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); LOG.info("DistCp job-id: " + jobID); return job; diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index eeaf30a929996..1a40d78b2651a 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.tools.contract; import static org.apache.hadoop.fs.contract.ContractTestUtils.*; +import static org.apache.hadoop.tools.DistCpConstants.CONF_LABEL_DISTCP_JOB_ID; import java.io.IOException; import java.util.Collections; @@ -42,6 +43,7 @@ import org.apache.hadoop.tools.DistCpConstants; import org.apache.hadoop.tools.DistCpOptions; import org.apache.hadoop.tools.mapred.CopyMapper; +import org.apache.hadoop.tools.util.DistCpTestUtils; import org.junit.Before; import org.junit.Rule; @@ -464,6 +466,17 @@ public void testLargeFilesFromRemote() throws Exception { largeFiles(remoteFS, remoteDir, localFS, localDir); } + @Test + public void testSetJobId() throws Exception { + describe("check jobId is set in the conf"); + remoteFS.create(new Path(remoteDir, "file1")).close(); + DistCpTestUtils + .assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(), + localDir.toString(), null, conf); + assertNotNull("DistCp job id isn't set", + conf.get(CONF_LABEL_DISTCP_JOB_ID)); + } + /** * Executes a DistCp using a file system sub-tree with multiple nesting * levels. From 7ce6f1cec9c1ddb1a6c40f8463286553e4392c1b Mon Sep 17 00:00:00 2001 From: Cyrus Jackson Date: Fri, 19 Mar 2021 15:49:05 +0530 Subject: [PATCH 123/666] YARN-10476. Queue metrics for Unmanaged applications (#2674). Contributed by Cyrus Jackson --- .../scheduler/AppSchedulingInfo.java | 26 +++- .../scheduler/QueueMetrics.java | 137 ++++++++++++++--- .../scheduler/SchedulerApplication.java | 13 +- .../SchedulerApplicationAttempt.java | 2 +- .../scheduler/capacity/CapacityScheduler.java | 22 ++- .../scheduler/capacity/LeafQueue.java | 4 +- .../event/AppAddedSchedulerEvent.java | 7 + .../scheduler/fair/FairScheduler.java | 10 +- .../scheduler/fifo/FifoScheduler.java | 15 +- .../TestWorkPreservingRMRestart.java | 21 ++- .../scheduler/AppMetricsChecker.java | 22 ++- .../scheduler/TestAbstractYarnScheduler.java | 3 +- .../scheduler/TestAppSchedulingInfo.java | 4 +- .../scheduler/TestPartitionQueueMetrics.java | 35 ++--- .../scheduler/TestQueueMetrics.java | 139 ++++++++++++++---- .../TestSchedulerApplicationAttempt.java | 3 +- .../scheduler/capacity/TestLeafQueue.java | 93 +++++++++++- .../scheduler/fifo/TestFifoScheduler.java | 2 +- 18 files changed, 445 insertions(+), 113 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java index 8e65e6a42e340..a3b235bc3c97b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java @@ -102,11 +102,13 @@ public class AppSchedulingInfo { private final Map applicationSchedulingEnvs = new HashMap<>(); private final RMContext rmContext; private final int retryAttempts; + private boolean unmanagedAM; public AppSchedulingInfo(ApplicationAttemptId appAttemptId, String user, Queue queue, AbstractUsersManager abstractUsersManager, long epoch, ResourceUsage appResourceUsage, - Map applicationSchedulingEnvs, RMContext rmContext) { + Map applicationSchedulingEnvs, RMContext rmContext, + boolean unmanagedAM) { this.applicationAttemptId = appAttemptId; this.applicationId = appAttemptId.getApplicationId(); this.queue = queue; @@ -120,6 +122,7 @@ public AppSchedulingInfo(ApplicationAttemptId appAttemptId, String user, this.retryAttempts = rmContext.getYarnConfiguration().getInt( YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS, YarnConfiguration.DEFAULT_RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS); + this.unmanagedAM = unmanagedAM; ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); updateContext = new ContainerUpdateContext(this); @@ -156,6 +159,14 @@ public boolean isPending() { return pending; } + public void setUnmanagedAM(boolean unmanagedAM) { + this.unmanagedAM = unmanagedAM; + } + + public boolean isUnmanagedAM() { + return unmanagedAM; + } + public Set getRequestedPartitions() { return requestedPartitions; } @@ -617,8 +628,10 @@ public void move(Queue newQueue) { ap.getPrimaryRequestedNodePartition(), delta); } } - oldMetrics.moveAppFrom(this); - newMetrics.moveAppTo(this); + + oldMetrics.moveAppFrom(this, isUnmanagedAM()); + newMetrics.moveAppTo(this, isUnmanagedAM()); + abstractUsersManager.deactivateApplication(user, applicationId); abstractUsersManager = newQueue.getAbstractUsersManager(); if (!schedulerKeys.isEmpty()) { @@ -649,7 +662,8 @@ public void stop() { ask.getCount())); } } - metrics.finishAppAttempt(applicationId, pending, user); + + metrics.finishAppAttempt(applicationId, pending, user, unmanagedAM); // Clear requests themselves clearRequests(); @@ -695,7 +709,7 @@ public void recoverContainer(RMContainer rmContainer, String partition) { // If there was any container to recover, the application was // running from scheduler's POV. pending = false; - metrics.runAppAttempt(applicationId, user); + metrics.runAppAttempt(applicationId, user, isUnmanagedAM()); } // Container is completed. Skip recovering resources. @@ -736,7 +750,7 @@ private void updateMetricsForAllocatedContainer(NodeType type, // once an allocation is done we assume the application is // running from scheduler's POV. pending = false; - metrics.runAppAttempt(applicationId, user); + metrics.runAppAttempt(applicationId, user, isUnmanagedAM()); } updateMetrics(applicationId, type, node, containerAllocated, user, queue); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java index 1e42e7a01d958..6315e704c31a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java @@ -62,6 +62,20 @@ public class QueueMetrics implements MetricsSource { @Metric("# of apps completed") MutableCounterInt appsCompleted; @Metric("# of apps killed") MutableCounterInt appsKilled; @Metric("# of apps failed") MutableCounterInt appsFailed; + + @Metric("# of Unmanaged apps submitted") + private MutableCounterInt unmanagedAppsSubmitted; + @Metric("# of Unmanaged running apps") + private MutableGaugeInt unmanagedAppsRunning; + @Metric("# of Unmanaged pending apps") + private MutableGaugeInt unmanagedAppsPending; + @Metric("# of Unmanaged apps completed") + private MutableCounterInt unmanagedAppsCompleted; + @Metric("# of Unmanaged apps killed") + private MutableCounterInt unmanagedAppsKilled; + @Metric("# of Unmanaged apps failed") + private MutableCounterInt unmanagedAppsFailed; + @Metric("Aggregate # of allocated node-local containers") MutableCounterLong aggregateNodeLocalContainersAllocated; @Metric("Aggregate # of allocated rack-local containers") @@ -401,103 +415,158 @@ public void getMetrics(MetricsCollector collector, boolean all) { registry.snapshot(collector.addRecord(registry.info()), all); } - public void submitApp(String user) { + public void submitApp(String user, boolean unmanagedAM) { appsSubmitted.incr(); + if(unmanagedAM) { + unmanagedAppsSubmitted.incr(); + } QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { - userMetrics.submitApp(user); + userMetrics.submitApp(user, unmanagedAM); } if (parent != null) { - parent.submitApp(user); + parent.submitApp(user, unmanagedAM); } } - public void submitAppAttempt(String user) { + + public void submitAppAttempt(String user, boolean unmanagedAM) { appsPending.incr(); + if(unmanagedAM) { + unmanagedAppsPending.incr(); + } QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { - userMetrics.submitAppAttempt(user); + userMetrics.submitAppAttempt(user, unmanagedAM); } if (parent != null) { - parent.submitAppAttempt(user); + parent.submitAppAttempt(user, unmanagedAM); } } - public void runAppAttempt(ApplicationId appId, String user) { + public void runAppAttempt(ApplicationId appId, String user, + boolean unmanagedAM) { runBuckets.add(appId, System.currentTimeMillis()); appsRunning.incr(); appsPending.decr(); + + if(unmanagedAM) { + unmanagedAppsRunning.incr(); + unmanagedAppsPending.decr(); + } + QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { - userMetrics.runAppAttempt(appId, user); + userMetrics.runAppAttempt(appId, user, unmanagedAM); } if (parent != null) { - parent.runAppAttempt(appId, user); + parent.runAppAttempt(appId, user, unmanagedAM); } } - public void finishAppAttempt( - ApplicationId appId, boolean isPending, String user) { + public void finishAppAttempt(ApplicationId appId, boolean isPending, + String user, boolean unmanagedAM) { runBuckets.remove(appId); if (isPending) { appsPending.decr(); } else { appsRunning.decr(); } + + if(unmanagedAM) { + if (isPending) { + unmanagedAppsPending.decr(); + } else { + unmanagedAppsRunning.decr(); + } + } QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { - userMetrics.finishAppAttempt(appId, isPending, user); + userMetrics.finishAppAttempt(appId, isPending, user, unmanagedAM); } if (parent != null) { - parent.finishAppAttempt(appId, isPending, user); + parent.finishAppAttempt(appId, isPending, user, unmanagedAM); } } - public void finishApp(String user, RMAppState rmAppFinalState) { + public void finishApp(String user, RMAppState rmAppFinalState, + boolean unmanagedAM) { switch (rmAppFinalState) { case KILLED: appsKilled.incr(); break; case FAILED: appsFailed.incr(); break; default: appsCompleted.incr(); break; } + + if(unmanagedAM) { + switch (rmAppFinalState) { + case KILLED: + unmanagedAppsKilled.incr(); + break; + case FAILED: + unmanagedAppsFailed.incr(); + break; + default: + unmanagedAppsCompleted.incr(); + break; + } + } + QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { - userMetrics.finishApp(user, rmAppFinalState); + userMetrics.finishApp(user, rmAppFinalState, unmanagedAM); } if (parent != null) { - parent.finishApp(user, rmAppFinalState); + parent.finishApp(user, rmAppFinalState, unmanagedAM); } } - - public void moveAppFrom(AppSchedulingInfo app) { + + + public void moveAppFrom(AppSchedulingInfo app, boolean unmanagedAM) { if (app.isPending()) { appsPending.decr(); } else { appsRunning.decr(); } + if(unmanagedAM) { + if (app.isPending()) { + unmanagedAppsPending.decr(); + } else { + unmanagedAppsRunning.decr(); + } + } + QueueMetrics userMetrics = getUserMetrics(app.getUser()); if (userMetrics != null) { - userMetrics.moveAppFrom(app); + userMetrics.moveAppFrom(app, unmanagedAM); } if (parent != null) { - parent.moveAppFrom(app); + parent.moveAppFrom(app, unmanagedAM); } } - - public void moveAppTo(AppSchedulingInfo app) { + + public void moveAppTo(AppSchedulingInfo app, boolean unmanagedAM) { if (app.isPending()) { appsPending.incr(); } else { appsRunning.incr(); } + if(unmanagedAM) { + if (app.isPending()) { + unmanagedAppsPending.incr(); + } else { + unmanagedAppsRunning.incr(); + } + } QueueMetrics userMetrics = getUserMetrics(app.getUser()); if (userMetrics != null) { - userMetrics.moveAppTo(app); + userMetrics.moveAppTo(app, unmanagedAM); } if (parent != null) { - parent.moveAppTo(app); + parent.moveAppTo(app, unmanagedAM); } } + /** * Set available resources. To be called by scheduler periodically as * resources become available. @@ -1024,18 +1093,34 @@ public int getAppsSubmitted() { return appsSubmitted.value(); } + public int getUnmanagedAppsSubmitted() { + return unmanagedAppsSubmitted.value(); + } + public int getAppsRunning() { return appsRunning.value(); } + public int getUnmanagedAppsRunning() { + return unmanagedAppsRunning.value(); + } + public int getAppsPending() { return appsPending.value(); } + public int getUnmanagedAppsPending() { + return unmanagedAppsPending.value(); + } + public int getAppsCompleted() { return appsCompleted.value(); } + public int getUnmanagedAppsCompleted() { + return unmanagedAppsCompleted.value(); + } + public int getAppsKilled() { return appsKilled.value(); } @@ -1044,6 +1129,10 @@ public int getAppsFailed() { return appsFailed.value(); } + public int getUnmanagedAppsFailed() { + return unmanagedAppsFailed.value(); + } + public Resource getAllocatedResources() { if (queueMetricsForCustomResources != null) { return Resource.newInstance(allocatedMB.value(), allocatedVCores.value(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java index 96288f8b9f312..fce7b551c6142 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java @@ -30,16 +30,20 @@ public class SchedulerApplication { private final String user; private volatile T currentAttempt; private volatile Priority priority; + private boolean unmanagedAM; - public SchedulerApplication(Queue queue, String user) { + public SchedulerApplication(Queue queue, String user, boolean unmanagedAM) { this.queue = queue; this.user = user; + this.unmanagedAM = unmanagedAM; this.priority = null; } - public SchedulerApplication(Queue queue, String user, Priority priority) { + public SchedulerApplication(Queue queue, String user, Priority priority, + boolean unmanagedAM) { this.queue = queue; this.user = user; + this.unmanagedAM = unmanagedAM; this.priority = priority; } @@ -64,7 +68,7 @@ public void setCurrentAppAttempt(T currentAttempt) { } public void stop(RMAppState rmAppFinalState) { - queue.getMetrics().finishApp(user, rmAppFinalState); + queue.getMetrics().finishApp(user, rmAppFinalState, isUnmanagedAM()); } public Priority getPriority() { @@ -80,4 +84,7 @@ public void setPriority(Priority priority) { } } + public boolean isUnmanagedAM() { + return unmanagedAM; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 649fabd796a9e..6f922bbd30625 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -241,7 +241,7 @@ public SchedulerApplicationAttempt(ApplicationAttemptId applicationAttemptId, this.appSchedulingInfo = new AppSchedulingInfo(applicationAttemptId, user, queue, abstractUsersManager, rmContext.getEpoch(), attemptResourceUsage, - applicationSchedulingEnvs, rmContext); + applicationSchedulingEnvs, rmContext, unmanagedAM); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); readLock = lock.readLock(); writeLock = lock.writeLock(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index d259c93a1f454..70738c0a5a794 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -865,7 +865,8 @@ public boolean isAmbiguous(String queueName) { private void addApplicationOnRecovery(ApplicationId applicationId, String queueName, String user, - Priority priority, ApplicationPlacementContext placementContext) { + Priority priority, ApplicationPlacementContext placementContext, + boolean unmanagedAM) { writeLock.lock(); try { //check if the queue needs to be auto-created during recovery @@ -927,9 +928,11 @@ private void addApplicationOnRecovery(ApplicationId applicationId, // Ignore the exception for recovered app as the app was previously // accepted. } - queue.getMetrics().submitApp(user); + queue.getMetrics().submitApp(user, unmanagedAM); + SchedulerApplication application = - new SchedulerApplication(queue, user, priority); + new SchedulerApplication(queue, user, priority, + unmanagedAM); applications.put(applicationId, application); LOG.info("Accepted application " + applicationId + " from user: " + user + ", in queue: " + queueName); @@ -1012,7 +1015,7 @@ private CSQueue getOrCreateQueueFromPlacementContext(ApplicationId private void addApplication(ApplicationId applicationId, String queueName, String user, Priority priority, - ApplicationPlacementContext placementContext) { + ApplicationPlacementContext placementContext, boolean unmanagedAM) { writeLock.lock(); try { if (isSystemAppsLimitReached()) { @@ -1116,9 +1119,10 @@ private void addApplication(ApplicationId applicationId, String queueName, return; } // update the metrics - queue.getMetrics().submitApp(user); + queue.getMetrics().submitApp(user, unmanagedAM); SchedulerApplication application = - new SchedulerApplication(queue, user, priority); + new SchedulerApplication(queue, user, priority, + unmanagedAM); applications.put(applicationId, application); LOG.info("Accepted application " + applicationId + " from user: " + user + ", in queue: " + queueName); @@ -1986,11 +1990,13 @@ public void handle(SchedulerEvent event) { if (!appAddedEvent.getIsAppRecovering()) { addApplication(appAddedEvent.getApplicationId(), queueName, appAddedEvent.getUser(), appAddedEvent.getApplicatonPriority(), - appAddedEvent.getPlacementContext()); + appAddedEvent.getPlacementContext(), + appAddedEvent.isUnmanagedAM()); } else { addApplicationOnRecovery(appAddedEvent.getApplicationId(), queueName, appAddedEvent.getUser(), appAddedEvent.getApplicatonPriority(), - appAddedEvent.getPlacementContext()); + appAddedEvent.getPlacementContext(), + appAddedEvent.isUnmanagedAM()); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index dce3115b82f1a..89e7f83800376 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -603,7 +603,9 @@ public void submitApplicationAttempt(FiCaSchedulerApp application, // We don't want to update metrics for move app if (!isMoveApp) { - metrics.submitAppAttempt(userName); + boolean unmanagedAM = application.getAppSchedulingInfo() != null && + application.getAppSchedulingInfo().isUnmanagedAM(); + metrics.submitAppAttempt(userName, unmanagedAM); } getParent().submitApplicationAttempt(application, userName); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java index 80b7f2fb821eb..ea846d058b5ba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java @@ -34,6 +34,7 @@ public class AppAddedSchedulerEvent extends SchedulerEvent { private final boolean isAppRecovering; private final Priority appPriority; private final ApplicationPlacementContext placementContext; + private boolean unmanagedAM = false; public AppAddedSchedulerEvent(ApplicationId applicationId, String queue, String user) { @@ -58,6 +59,7 @@ public AppAddedSchedulerEvent(String user, this(submissionContext.getApplicationId(), submissionContext.getQueue(), user, isAppRecovering, submissionContext.getReservationID(), appPriority, null); + this.unmanagedAM = submissionContext.getUnmanagedAM(); } public AppAddedSchedulerEvent(String user, @@ -66,6 +68,7 @@ public AppAddedSchedulerEvent(String user, this(submissionContext.getApplicationId(), submissionContext.getQueue(), user, isAppRecovering, submissionContext.getReservationID(), appPriority, placementContext); + this.unmanagedAM = submissionContext.getUnmanagedAM(); } public AppAddedSchedulerEvent(ApplicationId applicationId, String queue, @@ -108,4 +111,8 @@ public Priority getApplicatonPriority() { public ApplicationPlacementContext getPlacementContext() { return placementContext; } + + public boolean isUnmanagedAM() { + return unmanagedAM; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index bc21a17cc732e..3790c48ba08d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -552,11 +552,15 @@ protected void addApplication(ApplicationId applicationId, return; } } + boolean unmanagedAM = rmApp != null && + rmApp.getApplicationSubmissionContext() != null + && rmApp.getApplicationSubmissionContext().getUnmanagedAM(); SchedulerApplication application = - new SchedulerApplication<>(queue, user); + new SchedulerApplication<>(queue, user, unmanagedAM); applications.put(applicationId, application); - queue.getMetrics().submitApp(user); + + queue.getMetrics().submitApp(user, unmanagedAM); LOG.info("Accepted application " + applicationId + " from user: " + user + ", in queue: " + queueName @@ -610,7 +614,7 @@ protected void addApplicationAttempt( maxRunningEnforcer.trackNonRunnableApp(attempt); } - queue.getMetrics().submitAppAttempt(user); + queue.getMetrics().submitAppAttempt(user, application.isUnmanagedAM()); LOG.info("Added Application Attempt " + applicationAttemptId + " to scheduler from user: " + user); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 9ec4822e513fe..c1a591eba0dd9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -389,11 +389,13 @@ public Allocation allocate(ApplicationAttemptId applicationAttemptId, @VisibleForTesting public synchronized void addApplication(ApplicationId applicationId, - String queue, String user, boolean isAppRecovering) { + String queue, String user, boolean isAppRecovering, + boolean unmanagedAM) { SchedulerApplication application = - new SchedulerApplication<>(DEFAULT_QUEUE, user); + new SchedulerApplication<>(DEFAULT_QUEUE, user, unmanagedAM); applications.put(applicationId, application); - metrics.submitApp(user); + + metrics.submitApp(user, unmanagedAM); LOG.info("Accepted application " + applicationId + " from user: " + user + ", currently num of applications: " + applications.size()); if (isAppRecovering) { @@ -424,7 +426,8 @@ public synchronized void addApplication(ApplicationId applicationId, } application.setCurrentAppAttempt(schedulerApp); - metrics.submitAppAttempt(user); + metrics.submitAppAttempt(user, application.isUnmanagedAM()); + LOG.info("Added Application Attempt " + appAttemptId + " to scheduler from user " + application.getUser()); if (isAttemptRecovering) { @@ -768,8 +771,8 @@ public void handle(SchedulerEvent event) { { AppAddedSchedulerEvent appAddedEvent = (AppAddedSchedulerEvent) event; addApplication(appAddedEvent.getApplicationId(), - appAddedEvent.getQueue(), appAddedEvent.getUser(), - appAddedEvent.getIsAppRecovering()); + appAddedEvent.getQueue(), appAddedEvent.getUser(), + appAddedEvent.getIsAppRecovering(), appAddedEvent.isUnmanagedAM()); } break; case APP_REMOVED: diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java index da7f26bea3f67..c5f45fd9789a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java @@ -1559,6 +1559,8 @@ public void testUAMRecoveryOnRMWorkPreservingRestart() throws Exception { new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService()); nm1.registerNode(); + QueueMetrics qm1 = rm1.getResourceScheduler().getRootQueueMetrics(); + assertUnmanagedAMQueueMetrics(qm1, 0, 0, 0, 0); // create app and launch the UAM MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(200, rm1) @@ -1567,6 +1569,7 @@ public void testUAMRecoveryOnRMWorkPreservingRestart() throws Exception { RMApp app0 = MockRMAppSubmitter.submit(rm1, data); MockAM am0 = MockRM.launchUAM(app0, rm1, nm1); am0.registerAppAttempt(); + assertUnmanagedAMQueueMetrics(qm1, 1, 1, 0, 0); // Allocate containers to UAM int numContainers = 2; @@ -1581,17 +1584,19 @@ public void testUAMRecoveryOnRMWorkPreservingRestart() throws Exception { new ArrayList()).getAllocatedContainers()); Thread.sleep(100); } + assertUnmanagedAMQueueMetrics(qm1, 1, 0, 1, 0); // start new RM rm2 = new MockRM(conf, memStore); rm2.start(); MockMemoryRMStateStore memStore2 = (MockMemoryRMStateStore) rm2.getRMStateStore(); + QueueMetrics qm2 = rm2.getResourceScheduler().getRootQueueMetrics(); rm2.waitForState(app0.getApplicationId(), RMAppState.ACCEPTED); rm2.waitForState(am0.getApplicationAttemptId(), RMAppAttemptState.LAUNCHED); - // recover app nm1.setResourceTrackerService(rm2.getResourceTrackerService()); + assertUnmanagedAMQueueMetrics(qm2, 1, 1, 0, 0); RMApp recoveredApp = rm2.getRMContext().getRMApps().get(app0.getApplicationId()); NMContainerStatus container1 = TestRMRestart @@ -1601,13 +1606,13 @@ public void testUAMRecoveryOnRMWorkPreservingRestart() throws Exception { .createNMContainerStatus(am0.getApplicationAttemptId(), 2, ContainerState.RUNNING); nm1.registerNode(Arrays.asList(container1, container2), null); - // Wait for RM to settle down on recovering containers; waitForNumContainersToRecover(2, rm2, am0.getApplicationAttemptId()); // retry registerApplicationMaster() after RM restart. am0.setAMRMProtocol(rm2.getApplicationMasterService(), rm2.getRMContext()); am0.registerAppAttempt(true); + assertUnmanagedAMQueueMetrics(qm2, 1, 0, 1, 0); // Check if UAM is correctly recovered on restart rm2.waitForState(app0.getApplicationId(), RMAppState.RUNNING); @@ -1626,6 +1631,7 @@ public void testUAMRecoveryOnRMWorkPreservingRestart() throws Exception { // Check if UAM is able to heart beat Assert.assertNotNull(am0.doHeartbeat()); + assertUnmanagedAMQueueMetrics(qm2, 1, 0, 1, 0); // Complete the UAM am0.unregisterAppAttempt(false); @@ -1633,15 +1639,26 @@ public void testUAMRecoveryOnRMWorkPreservingRestart() throws Exception { rm2.waitForState(app0.getApplicationId(), RMAppState.FINISHED); Assert.assertEquals(FinalApplicationStatus.SUCCEEDED, recoveredApp.getFinalApplicationStatus()); + assertUnmanagedAMQueueMetrics(qm2, 1, 0, 0, 1); // Restart RM once more to check UAM is not re-run MockRM rm3 = new MockRM(conf, memStore2); rm3.start(); recoveredApp = rm3.getRMContext().getRMApps().get(app0.getApplicationId()); + QueueMetrics qm3 = rm3.getResourceScheduler().getRootQueueMetrics(); Assert.assertEquals(RMAppState.FINISHED, recoveredApp.getState()); + assertUnmanagedAMQueueMetrics(qm2, 1, 0, 0, 1); + } + private void assertUnmanagedAMQueueMetrics(QueueMetrics qm, int appsSubmitted, + int appsPending, int appsRunning, int appsCompleted) { + Assert.assertEquals(appsSubmitted, qm.getUnmanagedAppsSubmitted()); + Assert.assertEquals(appsPending, qm.getUnmanagedAppsPending()); + Assert.assertEquals(appsRunning, qm.getUnmanagedAppsRunning()); + Assert.assertEquals(appsCompleted, qm.getUnmanagedAppsCompleted()); } + @Test(timeout = 30000) public void testUnknownUserOnRecovery() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppMetricsChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppMetricsChecker.java index b9506be4b3e18..19ee08f17cf2c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppMetricsChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppMetricsChecker.java @@ -33,6 +33,12 @@ import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.APPS_PENDING; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.APPS_RUNNING; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.APPS_SUBMITTED; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.UNMANAGED_APPS_COMPLETED; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.UNMANAGED_APPS_FAILED; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.UNMANAGED_APPS_KILLED; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.UNMANAGED_APPS_PENDING; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.UNMANAGED_APPS_RUNNING; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.UNMANAGED_APPS_SUBMITTED; final class AppMetricsChecker { private final static Logger LOG = @@ -45,7 +51,13 @@ final class AppMetricsChecker { .gaugeInt(APPS_RUNNING, 0) .counter(APPS_COMPLETED, 0) .counter(APPS_FAILED, 0) - .counter(APPS_KILLED, 0); + .counter(APPS_KILLED, 0) + .counter(UNMANAGED_APPS_SUBMITTED, 0) + .gaugeInt(UNMANAGED_APPS_PENDING, 0) + .gaugeInt(UNMANAGED_APPS_RUNNING, 0) + .counter(UNMANAGED_APPS_COMPLETED, 0) + .counter(UNMANAGED_APPS_FAILED, 0) + .counter(UNMANAGED_APPS_KILLED, 0); enum AppMetricsKey { APPS_SUBMITTED("AppsSubmitted"), @@ -53,7 +65,13 @@ enum AppMetricsKey { APPS_RUNNING("AppsRunning"), APPS_COMPLETED("AppsCompleted"), APPS_FAILED("AppsFailed"), - APPS_KILLED("AppsKilled"); + APPS_KILLED("AppsKilled"), + UNMANAGED_APPS_SUBMITTED("UnmanagedAppsSubmitted"), + UNMANAGED_APPS_PENDING("UnmanagedAppsPending"), + UNMANAGED_APPS_RUNNING("UnmanagedAppsRunning"), + UNMANAGED_APPS_COMPLETED("UnmanagedAppsCompleted"), + UNMANAGED_APPS_FAILED("UnmanagedAppsFailed"), + UNMANAGED_APPS_KILLED("UnmanagedAppsKilled"); private String value; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java index b1a69f019bfa7..47174a47b05be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java @@ -405,7 +405,8 @@ public void testReleasedContainerIfAppAttemptisNull() throws Exception { RMApp mockAPp = new MockRMApp(125, System.currentTimeMillis(), RMAppState.NEW); SchedulerApplication application = - new SchedulerApplication(null, mockAPp.getUser()); + new SchedulerApplication(null, mockAPp.getUser(), + false); // Second app with one app attempt RMApp app = MockRMAppSubmitter.submitWithMemory(200, rm1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java index 62e5f3d72ab2c..391649d409227 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java @@ -50,7 +50,7 @@ public void testBacklistChanged() { doReturn(new YarnConfiguration()).when(rmContext).getYarnConfiguration(); AppSchedulingInfo appSchedulingInfo = new AppSchedulingInfo(appAttemptId, "test", queue, null, 0, new ResourceUsage(), - new HashMap(), rmContext); + new HashMap(), rmContext, false); appSchedulingInfo.updatePlacesBlacklistedByApp(new ArrayList(), new ArrayList()); @@ -124,7 +124,7 @@ public void testSchedulerKeyAccounting() { doReturn(new YarnConfiguration()).when(rmContext).getYarnConfiguration(); AppSchedulingInfo info = new AppSchedulingInfo( appAttemptId, "test", queue, mock(ActiveUsersManager.class), 0, - new ResourceUsage(), new HashMap<>(), rmContext); + new ResourceUsage(), new HashMap<>(), rmContext, false); Assert.assertEquals(0, info.getSchedulerKeys().size()); Priority pri1 = Priority.newInstance(1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestPartitionQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestPartitionQueueMetrics.java index eb240d1b6d338..ab4296cc4faed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestPartitionQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestPartitionQueueMetrics.java @@ -86,8 +86,8 @@ public void testSinglePartitionWithSingleLevelQueueMetrics() QueueMetrics q2 = QueueMetrics.forQueue(ms, "root.q2", parentQueue, true, CONF); - q1.submitApp(user); - q1.submitAppAttempt(user); + q1.submitApp(user, false); + q1.submitAppAttempt(user, false); root.setAvailableResourcesToQueue("x", Resources.createResource(200 * GB, 200)); @@ -140,8 +140,8 @@ public void testTwoPartitionWithSingleLevelQueueMetrics() throws Exception { QueueMetrics.forQueue(ms, "root.q2", parentQueue, false, CONF); AppSchedulingInfo app = mockApp(user); - q1.submitApp(user); - q1.submitAppAttempt(user); + q1.submitApp(user, false); + q1.submitAppAttempt(user, false); root.setAvailableResourcesToQueue("x", Resources.createResource(200 * GB, 200)); @@ -414,8 +414,8 @@ public void testTwoLevelWithUserMetrics() { QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true, CONF); AppSchedulingInfo app = mockApp(user); - metrics.submitApp(user); - metrics.submitAppAttempt(user); + metrics.submitApp(user, false); + metrics.submitAppAttempt(user, false); parentMetrics.setAvailableResourcesToQueue(partition, Resources.createResource(100 * GB, 100)); @@ -447,7 +447,7 @@ public void testTwoLevelWithUserMetrics() { checkResources(partitionSource, 0, 0, 0, 0, 0, 100 * GB, 100, 18 * GB, 18, 6, 0, 0, 0); - metrics.runAppAttempt(app.getApplicationId(), user); + metrics.runAppAttempt(app.getApplicationId(), user, false); metrics.allocateResources(partition, user, 3, Resources.createResource(1 * GB, 1), true); @@ -491,9 +491,9 @@ public void testTwoLevelWithUserMetrics() { 0, 0, 0); metrics.finishAppAttempt(app.getApplicationId(), app.isPending(), - app.getUser()); + app.getUser(), false); - metrics.finishApp(user, RMAppState.FINISHED); + metrics.finishApp(user, RMAppState.FINISHED, false); } @Test @@ -519,8 +519,8 @@ public void testThreeLevelWithUserMetrics() { QueueMetrics.forQueue(leafQueueName1, leafQueue, true, CONF); AppSchedulingInfo app = mockApp(user); - metrics1.submitApp(user); - metrics1.submitAppAttempt(user); + metrics1.submitApp(user, false); + metrics1.submitAppAttempt(user, false); parentMetrics.setAvailableResourcesToQueue(partitionX, Resources.createResource(200 * GB, 200)); @@ -615,9 +615,9 @@ public void testThreeLevelWithUserMetrics() { 0, 0); metrics1.finishAppAttempt(app.getApplicationId(), app.isPending(), - app.getUser()); + app.getUser(), false); - metrics1.finishApp(user, RMAppState.FINISHED); + metrics1.finishApp(user, RMAppState.FINISHED, false); } /** @@ -650,8 +650,8 @@ public void testSinglePartitionWithSingleLevelQueueMetricsWithoutUserMetrics() AppSchedulingInfo app = mockApp(user); - q1.submitApp(user); - q1.submitAppAttempt(user); + q1.submitApp(user, false); + q1.submitAppAttempt(user, false); root.setAvailableResourcesToQueue("x", Resources.createResource(200 * GB, 200)); @@ -680,8 +680,9 @@ public void testSinglePartitionWithSingleLevelQueueMetricsWithoutUserMetrics() checkResources(q2Source, 0, 0, 0, 0, 0, 3 * GB, 3, 3); checkResources(q2UserSource, 0, 0, 0, 0, 0, 3 * GB, 3, 3); - q1.finishAppAttempt(app.getApplicationId(), app.isPending(), app.getUser()); - q1.finishApp(user, RMAppState.FINISHED); + q1.finishAppAttempt(app.getApplicationId(), app.isPending(), app.getUser(), + false); + q1.finishApp(user, RMAppState.FINISHED, false); } public static MetricsSource partitionSource(MetricsSystem ms, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java index 33c39290de572..2137285bac036 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java @@ -44,6 +44,10 @@ import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.APPS_PENDING; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.APPS_RUNNING; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.APPS_SUBMITTED; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.UNMANAGED_APPS_FAILED; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.UNMANAGED_APPS_PENDING; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.UNMANAGED_APPS_RUNNING; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppMetricsChecker.AppMetricsKey.UNMANAGED_APPS_SUBMITTED; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceMetricsChecker.ResourceMetricsKey.AGGREGATE_CONTAINERS_ALLOCATED; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceMetricsChecker.ResourceMetricsKey.AGGREGATE_CONTAINERS_RELEASED; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceMetricsChecker.ResourceMetricsKey.ALLOCATED_CONTAINERS; @@ -89,12 +93,12 @@ public void testDefaultSingleQueueMetrics() { MetricsSource queueSource= queueSource(ms, queueName); AppSchedulingInfo app = mockApp(USER); - metrics.submitApp(USER); + metrics.submitApp(USER, false); MetricsSource userSource = userSource(ms, queueName, USER); AppMetricsChecker appMetricsChecker = AppMetricsChecker.create() .counter(APPS_SUBMITTED, 1) .checkAgainst(queueSource, true); - metrics.submitAppAttempt(USER); + metrics.submitAppAttempt(USER, false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_PENDING, 1) .checkAgainst(queueSource, true); @@ -111,7 +115,7 @@ public void testDefaultSingleQueueMetrics() { .gaugeLong(PENDING_MB, 15 * GB).gaugeInt(PENDING_V_CORES, 15) .gaugeInt(PENDING_CONTAINERS, 5).checkAgainst(queueSource); - metrics.runAppAttempt(app.getApplicationId(), USER); + metrics.runAppAttempt(app.getApplicationId(), USER, false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_PENDING, 0) .gaugeInt(APPS_RUNNING, 1) @@ -151,12 +155,12 @@ public void testDefaultSingleQueueMetrics() { .checkAgainst(queueSource); metrics.finishAppAttempt( - app.getApplicationId(), app.isPending(), app.getUser()); + app.getApplicationId(), app.isPending(), app.getUser(), false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .counter(APPS_SUBMITTED, 1) .gaugeInt(APPS_RUNNING, 0) .checkAgainst(queueSource, true); - metrics.finishApp(USER, RMAppState.FINISHED); + metrics.finishApp(USER, RMAppState.FINISHED, false); AppMetricsChecker.createFromChecker(appMetricsChecker) .counter(APPS_COMPLETED, 1) .checkAgainst(queueSource, true); @@ -172,36 +176,36 @@ public void testQueueAppMetricsForMultipleFailures() { MetricsSource queueSource = queueSource(ms, queueName); AppSchedulingInfo app = mockApp(USER); - metrics.submitApp(USER); + metrics.submitApp(USER, false); MetricsSource userSource = userSource(ms, queueName, USER); AppMetricsChecker appMetricsChecker = AppMetricsChecker.create() .counter(APPS_SUBMITTED, 1) .checkAgainst(queueSource, true); - metrics.submitAppAttempt(USER); + metrics.submitAppAttempt(USER, false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_PENDING, 1) .checkAgainst(queueSource, true); - metrics.runAppAttempt(app.getApplicationId(), USER); + metrics.runAppAttempt(app.getApplicationId(), USER, false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_PENDING, 0) .gaugeInt(APPS_RUNNING, 1) .checkAgainst(queueSource, true); metrics.finishAppAttempt( - app.getApplicationId(), app.isPending(), app.getUser()); + app.getApplicationId(), app.isPending(), app.getUser(), false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_RUNNING, 0) .checkAgainst(queueSource, true); // As the application has failed, framework retries the same application // based on configuration - metrics.submitAppAttempt(USER); + metrics.submitAppAttempt(USER, false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_PENDING, 1) .checkAgainst(queueSource, true); - metrics.runAppAttempt(app.getApplicationId(), USER); + metrics.runAppAttempt(app.getApplicationId(), USER, false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_PENDING, 0) .gaugeInt(APPS_RUNNING, 1) @@ -209,19 +213,19 @@ public void testQueueAppMetricsForMultipleFailures() { // Suppose say application has failed this time as well. metrics.finishAppAttempt( - app.getApplicationId(), app.isPending(), app.getUser()); + app.getApplicationId(), app.isPending(), app.getUser(), false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_RUNNING, 0) .checkAgainst(queueSource, true); // As the application has failed, framework retries the same application // based on configuration - metrics.submitAppAttempt(USER); + metrics.submitAppAttempt(USER, false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_PENDING, 1) .checkAgainst(queueSource, true); - metrics.runAppAttempt(app.getApplicationId(), USER); + metrics.runAppAttempt(app.getApplicationId(), USER, false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_PENDING, 0) .gaugeInt(APPS_RUNNING, 1) @@ -229,12 +233,12 @@ public void testQueueAppMetricsForMultipleFailures() { // Suppose say application has failed, and there's no more retries. metrics.finishAppAttempt( - app.getApplicationId(), app.isPending(), app.getUser()); + app.getApplicationId(), app.isPending(), app.getUser(), false); appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_RUNNING, 0) .checkAgainst(queueSource, true); - metrics.finishApp(USER, RMAppState.FAILED); + metrics.finishApp(USER, RMAppState.FAILED, false); AppMetricsChecker.createFromChecker(appMetricsChecker) .gaugeInt(APPS_RUNNING, 0) .counter(APPS_FAILED, 1) @@ -243,6 +247,87 @@ public void testQueueAppMetricsForMultipleFailures() { assertNull(userSource); } + @Test + public void testQueueUnmanagedAppMetricsForMultipleFailures() { + String queueName = "single"; + + QueueMetrics metrics = QueueMetrics.forQueue(ms, queueName, null, false, + new Configuration()); + MetricsSource queueSource = queueSource(ms, queueName); + AppSchedulingInfo app = mockApp(USER); + + // Submit an unmanaged Application. + metrics.submitApp(USER, true); + MetricsSource userSource = userSource(ms, queueName, USER); + AppMetricsChecker appMetricsChecker = AppMetricsChecker.create() + .counter(UNMANAGED_APPS_SUBMITTED, 1).counter(APPS_SUBMITTED, 1) + .checkAgainst(queueSource, true); + metrics.submitAppAttempt(USER, true); + appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) + .gaugeInt(UNMANAGED_APPS_PENDING, 1).gaugeInt(APPS_PENDING, 1) + .checkAgainst(queueSource, true); + + metrics.runAppAttempt(app.getApplicationId(), USER, true); + appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) + .gaugeInt(UNMANAGED_APPS_PENDING, 0).gaugeInt(APPS_PENDING, 0) + .gaugeInt(UNMANAGED_APPS_RUNNING, 1).gaugeInt(APPS_RUNNING, 1) + .checkAgainst(queueSource, true); + + metrics.finishAppAttempt( + app.getApplicationId(), app.isPending(), app.getUser(), true); + appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) + .gaugeInt(UNMANAGED_APPS_RUNNING, 0).gaugeInt(APPS_RUNNING, 0) + .checkAgainst(queueSource, true); + + // As the application has failed, framework retries the same application + // based on configuration + metrics.submitAppAttempt(USER, true); + appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) + .gaugeInt(UNMANAGED_APPS_PENDING, 1).gaugeInt(APPS_PENDING, 1) + .checkAgainst(queueSource, true); + + metrics.runAppAttempt(app.getApplicationId(), USER, true); + appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) + .gaugeInt(UNMANAGED_APPS_PENDING, 0).gaugeInt(APPS_PENDING, 0) + .gaugeInt(UNMANAGED_APPS_RUNNING, 1).gaugeInt(APPS_RUNNING, 1) + .checkAgainst(queueSource, true); + + // Suppose say application has failed this time as well. + metrics.finishAppAttempt( + app.getApplicationId(), app.isPending(), app.getUser(), true); + appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) + .gaugeInt(UNMANAGED_APPS_RUNNING, 0).gaugeInt(APPS_RUNNING, 0) + .checkAgainst(queueSource, true); + + // As the application has failed, framework retries the same application + // based on configuration + metrics.submitAppAttempt(USER, true); + appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) + .gaugeInt(UNMANAGED_APPS_PENDING, 1).gaugeInt(APPS_PENDING, 1) + .checkAgainst(queueSource, true); + + metrics.runAppAttempt(app.getApplicationId(), USER, true); + appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) + .gaugeInt(UNMANAGED_APPS_PENDING, 0).gaugeInt(APPS_PENDING, 0) + .gaugeInt(UNMANAGED_APPS_RUNNING, 1).gaugeInt(APPS_RUNNING, 1) + .checkAgainst(queueSource, true); + + // Suppose say application has failed, and there's no more retries. + metrics.finishAppAttempt( + app.getApplicationId(), app.isPending(), app.getUser(), true); + appMetricsChecker = AppMetricsChecker.createFromChecker(appMetricsChecker) + .gaugeInt(UNMANAGED_APPS_RUNNING, 0).gaugeInt(APPS_RUNNING, 0) + .checkAgainst(queueSource, true); + + metrics.finishApp(USER, RMAppState.FAILED, true); + AppMetricsChecker.createFromChecker(appMetricsChecker) + .gaugeInt(UNMANAGED_APPS_RUNNING, 0).gaugeInt(APPS_RUNNING, 0) + .counter(UNMANAGED_APPS_FAILED, 1).counter(APPS_FAILED, 1) + .checkAgainst(queueSource, true); + + assertNull(userSource); + } + @Test public void testSingleQueueWithUserMetrics() { String queueName = "single2"; @@ -252,7 +337,7 @@ public void testSingleQueueWithUserMetrics() { MetricsSource queueSource = queueSource(ms, queueName); AppSchedulingInfo app = mockApp(USER_2); - metrics.submitApp(USER_2); + metrics.submitApp(USER_2, false); MetricsSource userSource = userSource(ms, queueName, USER_2); AppMetricsChecker appMetricsQueueSourceChecker = AppMetricsChecker.create() @@ -262,7 +347,7 @@ public void testSingleQueueWithUserMetrics() { .counter(APPS_SUBMITTED, 1) .checkAgainst(userSource, true); - metrics.submitAppAttempt(USER_2); + metrics.submitAppAttempt(USER_2, false); appMetricsQueueSourceChecker = AppMetricsChecker .createFromChecker(appMetricsQueueSourceChecker) .gaugeInt(APPS_PENDING, 1) @@ -298,7 +383,7 @@ public void testSingleQueueWithUserMetrics() { .gaugeInt(PENDING_CONTAINERS, 5) .checkAgainst(userSource); - metrics.runAppAttempt(app.getApplicationId(), USER_2); + metrics.runAppAttempt(app.getApplicationId(), USER_2, false); appMetricsQueueSourceChecker = AppMetricsChecker .createFromChecker(appMetricsQueueSourceChecker) .gaugeInt(APPS_PENDING, 0) @@ -349,7 +434,7 @@ public void testSingleQueueWithUserMetrics() { .checkAgainst(userSource); metrics.finishAppAttempt( - app.getApplicationId(), app.isPending(), app.getUser()); + app.getApplicationId(), app.isPending(), app.getUser(), false); appMetricsQueueSourceChecker = AppMetricsChecker.createFromChecker(appMetricsQueueSourceChecker) .gaugeInt(APPS_RUNNING, 0) @@ -358,7 +443,7 @@ public void testSingleQueueWithUserMetrics() { AppMetricsChecker.createFromChecker(appMetricsUserSourceChecker) .gaugeInt(APPS_RUNNING, 0) .checkAgainst(userSource, true); - metrics.finishApp(USER_2, RMAppState.FINISHED); + metrics.finishApp(USER_2, RMAppState.FINISHED, false); AppMetricsChecker.createFromChecker(appMetricsQueueSourceChecker) .counter(APPS_COMPLETED, 1) .checkAgainst(queueSource, true); @@ -382,7 +467,7 @@ public void testNodeTypeMetrics() { MetricsSource queueSource = queueSource(ms, leafQueueName); //AppSchedulingInfo app = mockApp(user); - metrics.submitApp(USER); + metrics.submitApp(USER, false); MetricsSource userSource = userSource(ms, leafQueueName, USER); MetricsSource parentUserSource = userSource(ms, parentQueueName, USER); @@ -417,7 +502,7 @@ public void testTwoLevelWithUserMetrics() { QueueInfo root = new QueueInfo(null, "root", ms, conf, USER); QueueInfo leaf = new QueueInfo(root, "root.leaf", ms, conf, USER); - leaf.queueMetrics.submitApp(USER); + leaf.queueMetrics.submitApp(USER, false); AppMetricsChecker appMetricsQueueSourceChecker = AppMetricsChecker.create() .counter(APPS_SUBMITTED, 1) @@ -434,7 +519,7 @@ public void testTwoLevelWithUserMetrics() { .counter(APPS_SUBMITTED, 1) .checkAgainst(root.userSource, true); - leaf.queueMetrics.submitAppAttempt(USER); + leaf.queueMetrics.submitAppAttempt(USER, false); appMetricsQueueSourceChecker = AppMetricsChecker.createFromChecker(appMetricsQueueSourceChecker) .gaugeInt(APPS_PENDING, 1) @@ -489,7 +574,7 @@ public void testTwoLevelWithUserMetrics() { .gaugeLong(PENDING_MB, 15 * GB).gaugeInt(PENDING_V_CORES, 15) .gaugeInt(PENDING_CONTAINERS, 5).checkAgainst(root.userSource); - leaf.queueMetrics.runAppAttempt(app.getApplicationId(), USER); + leaf.queueMetrics.runAppAttempt(app.getApplicationId(), USER, false); appMetricsQueueSourceChecker = AppMetricsChecker.createFromChecker(appMetricsQueueSourceChecker) .gaugeInt(APPS_PENDING, 0) @@ -603,7 +688,7 @@ public void testTwoLevelWithUserMetrics() { .checkAgainst(root.userSource); leaf.queueMetrics.finishAppAttempt( - app.getApplicationId(), app.isPending(), app.getUser()); + app.getApplicationId(), app.isPending(), app.getUser(), false); appMetricsQueueSourceChecker = AppMetricsChecker .createFromChecker(appMetricsQueueSourceChecker) .counter(APPS_SUBMITTED, 1) @@ -627,7 +712,7 @@ public void testTwoLevelWithUserMetrics() { .gaugeInt(APPS_RUNNING, 0) .checkAgainst(root.userSource, true); - leaf.queueMetrics.finishApp(USER, RMAppState.FINISHED); + leaf.queueMetrics.finishApp(USER, RMAppState.FINISHED, false); AppMetricsChecker.createFromChecker(appMetricsQueueSourceChecker) .counter(APPS_COMPLETED, 1) .checkAgainst(leaf.queueSource, true); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java index 2a8f57956bada..65eed96c92520 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java @@ -126,7 +126,8 @@ public void testMove() { when(rmContext.getYarnConfiguration()).thenReturn(conf); SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId, user, oldQueue, oldQueue.getAbstractUsersManager(), rmContext); - oldMetrics.submitApp(user); + app.appSchedulingInfo.setUnmanagedAM(false); + oldMetrics.submitApp(user, false); // confirm that containerId is calculated based on epoch. assertEquals(0x30000000001L, app.getNewContainerId()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 75ab16f3a8460..f8de445bd6867 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -60,6 +60,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; @@ -79,6 +80,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; @@ -131,6 +133,7 @@ public class TestLeafQueue { CapacityScheduler cs; CapacitySchedulerConfiguration csConf; CapacitySchedulerContext csContext; + private RMApp rmApp; CSQueue root; private CSQueueStore queues; @@ -174,7 +177,7 @@ private void setUpInternal(ResourceCalculator rC, boolean withNodeLabels) ConcurrentMap spyApps = spy(new ConcurrentHashMap()); - RMApp rmApp = mock(RMApp.class); + rmApp = mock(RMApp.class); when(rmApp.getRMAppAttempt(any())).thenReturn(null); amResourceRequest = mock(ResourceRequest.class); when(amResourceRequest.getCapability()).thenReturn( @@ -466,7 +469,12 @@ public void testPolicyConfiguration() throws Exception { public void testAppAttemptMetrics() throws Exception { CSMaxRunningAppsEnforcer enforcer = mock(CSMaxRunningAppsEnforcer.class); cs.setMaxRunningAppsEnforcer(enforcer); - + ApplicationSubmissionContext applicationSubmissionContext = + mock(ApplicationSubmissionContext.class); + when(applicationSubmissionContext.getUnmanagedAM()).thenReturn(false); + when(rmApp.getApplicationSubmissionContext()) + .thenReturn(applicationSubmissionContext); + when(rmApp.getCurrentAppAttempt()).thenReturn(mock(RMAppAttempt.class)); // Manipulate queue 'a' LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B)); @@ -495,17 +503,18 @@ public void testAppAttemptMetrics() throws Exception { // Attempt the same application again final ApplicationAttemptId appAttemptId_1 = TestUtils .getMockApplicationAttemptId(0, 2); - FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, null, - spyRMContext); - app_1.setAMResource(Resource.newInstance(100, 1)); - a.submitApplicationAttempt(app_1, user_0); // same user + FiCaSchedulerApp app1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, + null, spyRMContext); + app1.getAppSchedulingInfo().setUnmanagedAM(false); + app1.setAMResource(Resource.newInstance(100, 1)); + a.submitApplicationAttempt(app1, user_0); // same user assertEquals(1, a.getMetrics().getAppsSubmitted()); assertEquals(1, a.getMetrics().getAppsPending()); assertEquals(1, a.getUser(user_0).getActiveApplications()); - assertEquals(app_1.getAMResource().getMemorySize(), a.getMetrics() + assertEquals(app1.getAMResource().getMemorySize(), a.getMetrics() .getUsedAMResourceMB()); - assertEquals(app_1.getAMResource().getVirtualCores(), a.getMetrics() + assertEquals(app1.getAMResource().getVirtualCores(), a.getMetrics() .getUsedAMResourceVCores()); event = new AppAttemptRemovedSchedulerEvent(appAttemptId_0, @@ -524,6 +533,74 @@ public void testAppAttemptMetrics() throws Exception { assertEquals(1, userMetrics.getAppsSubmitted()); } + @Test + public void testUnmanagedAppAttemptMetrics() throws Exception { + CSMaxRunningAppsEnforcer enforcer = mock(CSMaxRunningAppsEnforcer.class); + cs.setMaxRunningAppsEnforcer(enforcer); + // Manipulate queue 'a' + LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B)); + + // Users + final String user0 = "user_0"; + + // Submit applications + final ApplicationAttemptId appAttemptId0 = TestUtils + .getMockApplicationAttemptId(0, 1); + + ApplicationSubmissionContext applicationSubmissionContext = + ApplicationSubmissionContext.newInstance( + appAttemptId0.getApplicationId(), "test", a.getQueuePath(), + Priority.newInstance(0), null, true, true, + 2, null, "test"); + + AppAddedSchedulerEvent addAppEvent = + new AppAddedSchedulerEvent(user0, applicationSubmissionContext, false, + null); + cs.handle(addAppEvent); + AppAttemptAddedSchedulerEvent addAttemptEvent = + new AppAttemptAddedSchedulerEvent(appAttemptId0, false); + cs.handle(addAttemptEvent); + + AppAttemptRemovedSchedulerEvent event = new AppAttemptRemovedSchedulerEvent( + appAttemptId0, RMAppAttemptState.FAILED, false); + cs.handle(event); + + assertEquals(0, a.getMetrics().getUnmanagedAppsPending()); + assertEquals(0, a.getMetrics().getUnmanagedAppsFailed()); + + // Attempt the same application again + final ApplicationAttemptId appAttemptId1 = TestUtils + .getMockApplicationAttemptId(0, 2); + FiCaSchedulerApp app1 = new FiCaSchedulerApp(appAttemptId1, user0, a, + null, spyRMContext); + + app1.setAMResource(Resource.newInstance(100, 1)); + a.submitApplicationAttempt(app1, user0); // same user + + assertEquals(1, a.getMetrics().getUnmanagedAppsSubmitted()); + assertEquals(1, a.getMetrics().getUnmanagedAppsPending()); + assertEquals(1, a.getUser(user0).getActiveApplications()); + assertEquals(app1.getAMResource().getMemorySize(), a.getMetrics() + .getUsedAMResourceMB()); + assertEquals(app1.getAMResource().getVirtualCores(), a.getMetrics() + .getUsedAMResourceVCores()); + + event = new AppAttemptRemovedSchedulerEvent(appAttemptId0, + RMAppAttemptState.FINISHED, false); + cs.handle(event); + AppRemovedSchedulerEvent rEvent = new AppRemovedSchedulerEvent( + appAttemptId0.getApplicationId(), RMAppState.FINISHED); + cs.handle(rEvent); + + assertEquals(1, a.getMetrics().getUnmanagedAppsSubmitted()); + assertEquals(0, a.getMetrics().getUnmanagedAppsPending()); + assertEquals(0, a.getMetrics().getUnmanagedAppsFailed()); + assertEquals(1, a.getMetrics().getUnmanagedAppsCompleted()); + + QueueMetrics userMetrics = a.getMetrics().getUserMetrics(user0); + assertEquals(1, userMetrics.getUnmanagedAppsSubmitted()); + } + @Test public void testFairConfiguration() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 40650532ed2d4..7183f7f782f56 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -796,7 +796,7 @@ public void testNodeUpdateBeforeAppAttemptInit() throws Exception { scheduler.handle(new NodeAddedSchedulerEvent(node)); ApplicationId appId = ApplicationId.newInstance(0, 1); - scheduler.addApplication(appId, "queue1", "user1", false); + scheduler.addApplication(appId, "queue1", "user1", false, false); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); try { From 465e425700579015191a64e3dc96a26a39b42643 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Sat, 20 Mar 2021 09:07:44 +0530 Subject: [PATCH 124/666] HDFS-15904 : De-flake TestBalancer#testBalancerWithSortTopNodes() (#2785) Contributed by Viraj Jasani. Signed-off-by: Mingliang Liu Signed-off-by: Ayush Saxena --- .../hadoop/hdfs/server/balancer/Balancer.java | 24 +++++++++++++++--- .../hdfs/server/balancer/Dispatcher.java | 7 +----- .../hdfs/server/balancer/TestBalancer.java | 25 ++++++++++++++----- 3 files changed, 40 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 6734c977d7a9a..0024ba56913d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -634,10 +634,10 @@ void resetData(Configuration conf) { } static class Result { - final ExitStatus exitStatus; - final long bytesLeftToMove; - final long bytesBeingMoved; - final long bytesAlreadyMoved; + private final ExitStatus exitStatus; + private final long bytesLeftToMove; + private final long bytesBeingMoved; + private final long bytesAlreadyMoved; Result(ExitStatus exitStatus, long bytesLeftToMove, long bytesBeingMoved, long bytesAlreadyMoved) { @@ -647,6 +647,22 @@ static class Result { this.bytesAlreadyMoved = bytesAlreadyMoved; } + public ExitStatus getExitStatus() { + return exitStatus; + } + + public long getBytesLeftToMove() { + return bytesLeftToMove; + } + + public long getBytesBeingMoved() { + return bytesBeingMoved; + } + + public long getBytesAlreadyMoved() { + return bytesAlreadyMoved; + } + void print(int iteration, NameNodeConnector nnc, PrintStream out) { out.printf("%-24s %10d %19s %18s %17s %s%n", DateFormat.getDateTimeInstance().format(new Date()), iteration, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index c34e6a3ca4a13..17f0d8f73b38c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -1158,12 +1158,7 @@ public void executePendingMove(final PendingMove p) { p.proxySource.removePendingBlock(p); return; } - moveExecutor.execute(new Runnable() { - @Override - public void run() { - p.dispatch(); - } - }); + moveExecutor.execute(p::dispatch); } public boolean dispatchAndCheckContinue() throws InterruptedException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index b94cebc8d492a..343faf671f382 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -1024,14 +1024,14 @@ private static int runBalancer(Collection namenodes, // clean all lists b.resetData(conf); - if (r.exitStatus == ExitStatus.IN_PROGRESS) { + if (r.getExitStatus() == ExitStatus.IN_PROGRESS) { done = false; - } else if (r.exitStatus != ExitStatus.SUCCESS) { + } else if (r.getExitStatus() != ExitStatus.SUCCESS) { //must be an error statue, return. - return r.exitStatus.getExitCode(); + return r.getExitStatus().getExitCode(); } else { if (iteration > 0) { - assertTrue(r.bytesAlreadyMoved > 0); + assertTrue(r.getBytesAlreadyMoved() > 0); } } } @@ -1657,7 +1657,7 @@ public void testMaxIterationTime() throws Exception { // When a block move is not canceled in 2 seconds properly and then // a block is moved unexpectedly, IN_PROGRESS will be reported. assertEquals("We expect ExitStatus.NO_MOVE_PROGRESS to be reported.", - ExitStatus.NO_MOVE_PROGRESS, r.exitStatus); + ExitStatus.NO_MOVE_PROGRESS, r.getExitStatus()); } } finally { for (NameNodeConnector nnc : connectors) { @@ -2297,7 +2297,20 @@ public void testBalancerWithSortTopNodes() throws Exception { maxUsage = Math.max(maxUsage, datanodeReport[i].getDfsUsed()); } - assertEquals(200, balancerResult.bytesAlreadyMoved); + // The 95% usage DN will have 9 blocks of 100B and 1 block of 50B - all for the same file. + // The HDFS balancer will choose a block to move from this node randomly. More likely it will + // be 100B block. Since 100B is greater than DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY which is 99L, + // it will stop here. Total bytes moved from this 95% DN will be 1 block of size 100B. + // However, chances are the first block selected to be moved from this 95% DN is the 50B block. + // After this block is moved, the total moved size so far would be 50B which is smaller than + // DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY (99L), hence it will try to move another block. + // The second block will always be of size 100B. So total bytes moved from this 95% DN will be + // 2 blocks of size (100B + 50B) 150B. + // Hence, overall total blocks moved by HDFS balancer would be either of these 2 options: + // a) 2 blocks of total size (100B + 100B) + // b) 3 blocks of total size (50B + 100B + 100B) + assertTrue(balancerResult.getBytesAlreadyMoved() == 200 + || balancerResult.getBytesAlreadyMoved() == 250); // 100% and 95% used nodes will be balanced, so top used will be 900 assertEquals(900, maxUsage); } From eef149f2d33b57e6157ee61be5c1aafc36af4299 Mon Sep 17 00:00:00 2001 From: kwangsun Date: Mon, 22 Mar 2021 11:43:32 +0900 Subject: [PATCH 125/666] HADOOP-17952. Fix the wrong CIDR range example in Proxy User documentation. (#2780) Signed-off-by: Akira Ajisaka --- .../hadoop-common/src/site/markdown/Superusers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Superusers.md b/hadoop-common-project/hadoop-common/src/site/markdown/Superusers.md index 678d56b123c0f..56a763ad08e88 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Superusers.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Superusers.md @@ -80,7 +80,7 @@ If more lax security is preferred, the wildcard value \* may be used to allow im * -The `hadoop.proxyuser.$superuser.hosts` accepts list of ip addresses, ip address ranges in CIDR format and/or host names. For example, by specifying as below, user named `super` accessing from hosts in the range `10.222.0.0-15` and `10.113.221.221` can impersonate `user1` and `user2`. +The `hadoop.proxyuser.$superuser.hosts` accepts list of ip addresses, ip address ranges in CIDR format and/or host names. For example, by specifying as below, user named `super` accessing from hosts in the range `10.222.0.0-10.222.255.255` and `10.113.221.221` can impersonate `user1` and `user2`. hadoop.proxyuser.super.hosts From 4a2c88feae3609e402dc88fec7943d98153031cd Mon Sep 17 00:00:00 2001 From: Mingliang Liu Date: Sun, 21 Mar 2021 21:12:27 -0700 Subject: [PATCH 126/666] YARN-10706. Upgrade com.github.eirslett:frontend-maven-plugin to 1.11.2 (#2791) Contributed by Mingliang Liu. Signed-off-by: Ayush Saxena Signed-off-by: Akira Ajisaka --- hadoop-project/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 6a0813f906830..90a7c4420f8e5 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -185,7 +185,7 @@ 900 1.11.901 2.3.4 - 1.6 + 1.11.2 2.1 0.7 1.5.1 From e51a11146c10055c018a69c95da6304dc4de4819 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Mon, 22 Mar 2021 18:37:00 +0800 Subject: [PATCH 127/666] HDFS-15787. Remove unnecessary Lease Renew in FSNamesystem#internalReleaseLease. Contributed by Lisheng Sun. Signed-off-by: Ayush Saxena Signed-off-by: He Xiaoqiao --- .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c6902b8ea213c..7d9f78c0647fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3786,7 +3786,9 @@ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, "RecoveryId = " + blockRecoveryId + " for block " + lastBlock); } lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile); - leaseManager.renewLease(lease); + if (recoveryLeaseHolder == null) { + leaseManager.renewLease(lease); + } break; } return false; From 7bad01d86cacf10e91fbbf3234c1e97ce1f3aaab Mon Sep 17 00:00:00 2001 From: litao Date: Mon, 22 Mar 2021 19:29:17 +0800 Subject: [PATCH 128/666] HDFS-15906. Close FSImage and FSNamesystem after formatting is complete (#2788) --- .../apache/hadoop/hdfs/server/namenode/NameNode.java | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 54671ea6cf4ae..55196c4d44f03 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1245,8 +1245,9 @@ private static boolean format(Configuration conf, boolean force, LOG.info("Formatting using clusterid: {}", clusterId); FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat); + FSNamesystem fsn = null; try { - FSNamesystem fsn = new FSNamesystem(conf, fsImage); + fsn = new FSNamesystem(conf, fsImage); fsImage.getEditLog().initJournalsForWrite(); // Abort NameNode format if reformat is disabled and if @@ -1271,8 +1272,14 @@ private static boolean format(Configuration conf, boolean force, fsImage.format(fsn, clusterId, force); } catch (IOException ioe) { LOG.warn("Encountered exception during format", ioe); - fsImage.close(); throw ioe; + } finally { + if (fsImage != null) { + fsImage.close(); + } + if (fsn != null) { + fsn.close(); + } } return false; } From 1b8cba518ea3025f4b2fdc732f3138a59be971e5 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Mon, 22 Mar 2021 21:10:54 +0530 Subject: [PATCH 129/666] HDFS-15903. Refactor X-Platform lib (#2783) --- .../src/main/native/libhdfspp/CMakeLists.txt | 2 +- .../libhdfspp/examples/c/connect_cancel/CMakeLists.txt | 2 +- .../libhdfspp/examples/c/connect_cancel/connect_cancel.c | 2 +- .../libhdfspp/examples/cc/connect_cancel/CMakeLists.txt | 2 +- .../src/main/native/libhdfspp/lib/bindings/c/CMakeLists.txt | 6 +++--- .../src/main/native/libhdfspp/lib/x-platform/CMakeLists.txt | 4 ++-- .../libhdfspp/lib/x-platform/{c_api.cc => c-api/syscall.cc} | 2 +- .../libhdfspp/lib/x-platform/{c_api.h => c-api/syscall.h} | 6 +++--- .../src/main/native/libhdfspp/tests/CMakeLists.txt | 6 +++--- .../main/native/libhdfspp/tests/x-platform/CMakeLists.txt | 4 ++-- 10 files changed, 18 insertions(+), 18 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/{c_api.cc => c-api/syscall.cc} (96%) rename hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/{c_api.h => c-api/syscall.h} (83%) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt index 5125f57ed0108..b03ba68172228 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt @@ -278,7 +278,7 @@ if(NEED_LINK_DL) endif() set(LIBHDFSPP_VERSION "0.1.0") -set(LIBHDFSPP_ALL_OBJECTS $ $ $ $ $ $ $ $ $) +set(LIBHDFSPP_ALL_OBJECTS $ $ $ $ $ $ $ $ $) if (HADOOP_BUILD) hadoop_add_dual_library(hdfspp ${EMPTY_FILE_CC} ${LIBHDFSPP_ALL_OBJECTS}) hadoop_target_link_dual_libraries(hdfspp diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt index f645254d6b8fb..89e7e6cdc25bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/CMakeLists.txt @@ -23,6 +23,6 @@ set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX}) include_directories( ${LIBHDFSPP_DIR}/include ) link_directories( ${LIBHDFSPP_DIR}/lib ) -add_executable(connect_cancel_c $ connect_cancel.c) +add_executable(connect_cancel_c $ connect_cancel.c) target_link_libraries(connect_cancel_c hdfspp_static uriparser2) target_include_directories(connect_cancel_c PRIVATE ../../lib) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c index 6f1bbfe84e0c3..575f79218f4ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/c/connect_cancel/connect_cancel.c @@ -29,7 +29,7 @@ #include "hdfspp/hdfs_ext.h" #include "common/util_c.h" -#include "x-platform/c_api.h" +#include "x-platform/c-api/syscall.h" #define ERROR_BUFFER_SIZE 1024 diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt index c4bc7994c179c..8451d402d4d18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/examples/cc/connect_cancel/CMakeLists.txt @@ -23,6 +23,6 @@ set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX}) include_directories( ${LIBHDFSPP_DIR}/include ) link_directories( ${LIBHDFSPP_DIR}/lib ) -add_executable(connect_cancel $ connect_cancel.cc) +add_executable(connect_cancel $ connect_cancel.cc) target_link_libraries(connect_cancel hdfspp_static) target_include_directories(connect_cancel PRIVATE ../../lib) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/CMakeLists.txt index ff8d6b04465d9..560373f8d7dc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/CMakeLists.txt @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. -add_library(bindings_c_obj OBJECT $ hdfs.cc) -add_dependencies(bindings_c_obj fs rpc reader proto common fs rpc reader proto common x_platform_utils_obj) +add_library(bindings_c_obj OBJECT $ hdfs.cc) +add_dependencies(bindings_c_obj fs rpc reader proto common fs rpc reader proto common x_platform_obj) -add_library(bindings_c $ $) +add_library(bindings_c $ $) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/CMakeLists.txt index 61deb317ff738..77d0216d4877a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/CMakeLists.txt @@ -22,5 +22,5 @@ else() set(SYSCALL_SRC syscall_linux.cc) endif() -add_library(x_platform_utils_obj OBJECT ${SYSCALL_SRC} utils.cc) -add_library(x_platform_utils_obj_c_api OBJECT $ c_api.cc) +add_library(x_platform_obj OBJECT ${SYSCALL_SRC} utils.cc) +add_library(x_platform_obj_c_api OBJECT $ c-api/syscall.cc) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.cc rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc index b905d2fbb07fb..cca9f6a024807 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc @@ -16,7 +16,7 @@ * limitations under the License. */ -#include "syscall.h" +#include "x-platform/syscall.h" extern "C" int x_platform_syscall_write_to_stdout(const char* msg) { return XPlatform::Syscall::WriteToStdout(msg) ? 1 : 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h similarity index 83% rename from hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.h rename to hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h index 4bc7008cd9a2e..be905ae1d364e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c_api.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h @@ -16,8 +16,8 @@ * limitations under the License. */ -#ifndef NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_H -#define NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_H +#ifndef NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_SYSCALL_H +#define NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_SYSCALL_H /** * C APIs for accessing XPlatform @@ -25,4 +25,4 @@ int x_platform_syscall_write_to_stdout(const char* msg); -#endif // NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_H +#endif // NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_SYSCALL_H diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt index 0d877a2c4558f..32f75f474b8d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt @@ -112,7 +112,7 @@ add_executable(hdfs_builder_test hdfs_builder_test.cc) target_link_libraries(hdfs_builder_test test_common gmock_main bindings_c fs rpc proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(hdfs_builder_test hdfs_builder_test) -add_executable(logging_test logging_test.cc $) +add_executable(logging_test logging_test.cc $) target_link_libraries(logging_test common gmock_main bindings_c fs rpc proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(logging_test logging_test) @@ -144,10 +144,10 @@ include_directories ( ${CMAKE_CURRENT_SOURCE_DIR}/../../libhdfs-tests/ ) -add_library(hdfspp_test_shim_static STATIC $ hdfs_shim.c libhdfs_wrapper.c libhdfspp_wrapper.cc ${LIBHDFSPP_BINDING_C}/hdfs.cc) +add_library(hdfspp_test_shim_static STATIC $ hdfs_shim.c libhdfs_wrapper.c libhdfspp_wrapper.cc ${LIBHDFSPP_BINDING_C}/hdfs.cc) add_dependencies(hdfspp_test_shim_static proto) -add_library(hdfspp_test_static STATIC $ ${LIBHDFSPP_BINDING_C}/hdfs.cc) +add_library(hdfspp_test_static STATIC $ ${LIBHDFSPP_BINDING_C}/hdfs.cc) add_dependencies(hdfspp_test_static proto) # TODO: get all of the mini dfs library bits here in one place diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/CMakeLists.txt index 5a949387d687c..ac9f8fbd8cade 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/CMakeLists.txt @@ -17,9 +17,9 @@ # if(WIN32) - add_executable(x_platform_utils_test $ utils_common_test.cc utils_test_main.cc utils_win_test.cc) + add_executable(x_platform_utils_test $ utils_common_test.cc utils_test_main.cc utils_win_test.cc) else(WIN32) - add_executable(x_platform_utils_test $ utils_common_test.cc utils_test_main.cc utils_nix_test.cc) + add_executable(x_platform_utils_test $ utils_common_test.cc utils_test_main.cc utils_nix_test.cc) endif(WIN32) target_include_directories(x_platform_utils_test PRIVATE ${LIBHDFSPP_LIB_DIR}) target_link_libraries(x_platform_utils_test gmock_main) From 58df5c476c5b9a74f23f909e2f43dfa1cbadf87c Mon Sep 17 00:00:00 2001 From: Jim Brennan Date: Mon, 22 Mar 2021 18:41:25 +0000 Subject: [PATCH 130/666] MAPREDUCE-7325. Intermediate data encryption is broken in LocalJobRunner. Contributed by Ahmed Hussein --- .../mapreduce/task/reduce/LocalFetcher.java | 2 +- .../TestMRIntermediateDataEncryption.java | 216 ++++++++++++++---- 2 files changed, 170 insertions(+), 48 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java index 3ae1e746fcb9f..dc563eeab4d0f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java @@ -153,10 +153,10 @@ private boolean copyMapOutput(TaskAttemptID mapTaskId) throws IOException { FileSystem localFs = FileSystem.getLocal(job).getRaw(); FSDataInputStream inStream = localFs.open(mapOutputFileName); try { + inStream.seek(ir.startOffset); inStream = IntermediateEncryptedStream.wrapIfNecessary(job, inStream, mapOutputFileName); - inStream.seek(ir.startOffset + CryptoUtils.cryptoPadding(job)); mapOutput.shuffle(LOCALHOST, inStream, compressedLength, decompressedLength, metrics, reporter); } finally { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java index 79fcd4110ca27..fbee7ef5c0f47 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java @@ -44,6 +44,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -75,9 +76,20 @@ * mbs-per-map specifies the amount of data (in MBs) to generate per map. * By default, this is twice the value of mapreduce.task.io.sort.mb * map-tasks specifies the number of map tasks to run. + * Steps of the unit test: + * 1- Generating random input text. + * 2- Run a job with encryption disabled. Get the checksum of the output file + * checkSumReference. + * 3- Run the job with encryption enabled. + * 4- Compare checkSumReference to the checksum of the job output. + * 5- If the job has multiple reducers, the test launches one final job to + * combine the output files into a single one. + * 6- Verify that the maps spilled files. */ @RunWith(Parameterized.class) public class TestMRIntermediateDataEncryption { + public static final Logger LOG = + LoggerFactory.getLogger(TestMRIntermediateDataEncryption.class); /** * The number of bytes generated by the input generator. */ @@ -86,8 +98,6 @@ public class TestMRIntermediateDataEncryption { public static final int INPUT_GEN_NUM_THREADS = 16; public static final long TASK_SORT_IO_MB_DEFAULT = 128L; public static final String JOB_DIR_PATH = "jobs-data-path"; - private static final Logger LOG = - LoggerFactory.getLogger(TestMRIntermediateDataEncryption.class); /** * Directory of the test data. */ @@ -97,6 +107,7 @@ public class TestMRIntermediateDataEncryption { private static MiniDFSCluster dfsCluster; private static MiniMRClientCluster mrCluster; private static FileSystem fs; + private static FileChecksum checkSumReference; private static Path jobInputDirPath; private static long inputFileSize; /** @@ -136,11 +147,7 @@ public static Collection getTestParameters() { {"testSingleReducer", 3, 1, false}, {"testUberMode", 3, 1, true}, {"testMultipleMapsPerNode", 8, 1, false}, - // TODO: The following configuration is commented out until - // MAPREDUCE-7325 is fixed. - // Setting multiple reducers breaks LocalJobRunner causing the - // unit test to fail. - // {"testMultipleReducers", 2, 4, false} + {"testMultipleReducers", 2, 4, false} }); } @@ -171,6 +178,8 @@ public static void setupClass() throws Exception { // run the input generator job. Assert.assertEquals("Generating input should succeed", 0, generateInputTextFile()); + // run the reference job + runReferenceJob(); } @AfterClass @@ -185,7 +194,7 @@ public static void tearDown() throws IOException { // make sure that generated input file is deleted final File textInputFile = new File(testRootDir, "input.txt"); if (textInputFile.exists()) { - textInputFile.delete(); + Assert.assertTrue(textInputFile.delete()); } } @@ -198,7 +207,7 @@ private static Configuration createBaseConfiguration() { // Set the jvm arguments to enable intermediate encryption. Configuration conf = MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(null); - // Set the temp directories a subdir of the test directory. + // Set the temp directories a subDir of the test directory. conf = MRJobConfUtil.setLocalDirectoriesConfigForTesting(conf, testRootDir); conf.setLong("dfs.blocksize", BLOCK_SIZE_DEFAULT); return conf; @@ -207,7 +216,7 @@ private static Configuration createBaseConfiguration() { /** * Creates a thread safe BufferedWriter to be used among the task generators. * @return A synchronized BufferedWriter to the input file. - * @throws IOException + * @throws IOException opening a new {@link FileWriter}. */ private static synchronized BufferedWriter getTextInputWriter() throws IOException { @@ -223,7 +232,7 @@ private static synchronized BufferedWriter getTextInputWriter() * It creates a total INPUT_GEN_NUM_THREADS future tasks. * * @return the result of the input generation. 0 for success. - * @throws Exception + * @throws Exception during the I/O of job. */ private static int generateInputTextFile() throws Exception { final File textInputFile = new File(testRootDir, "input.txt"); @@ -270,6 +279,118 @@ private static int generateInputTextFile() throws Exception { return 0; } + /** + * Runs a WordCount job with encryption disabled and stores the checksum of + * the output file. + * @throws Exception due to I/O errors. + */ + private static void runReferenceJob() throws Exception { + final String jobRefLabel = "job-reference"; + final Path jobRefDirPath = new Path(JOB_DIR_PATH, jobRefLabel); + if (fs.exists(jobRefDirPath) && !fs.delete(jobRefDirPath, true)) { + throw new IOException("Could not delete " + jobRefDirPath); + } + Assert.assertTrue(fs.mkdirs(jobRefDirPath)); + Path jobRefOutputPath = new Path(jobRefDirPath, "out-dir"); + Configuration referenceConf = new Configuration(commonConfig); + referenceConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, false); + Job jobReference = runWordCountJob(jobRefLabel, jobRefOutputPath, + referenceConf, 4, 1); + Assert.assertTrue(jobReference.isSuccessful()); + FileStatus[] fileStatusArr = + fs.listStatus(jobRefOutputPath, + new Utils.OutputFileUtils.OutputFilesFilter()); + Assert.assertEquals(1, fileStatusArr.length); + checkSumReference = fs.getFileChecksum(fileStatusArr[0].getPath()); + Assert.assertTrue(fs.delete(jobRefDirPath, true)); + } + + private static Job runWordCountJob(String postfixName, Path jOutputPath, + Configuration jConf, int mappers, int reducers) throws Exception { + Job job = Job.getInstance(jConf); + job.getConfiguration().setInt(MRJobConfig.NUM_MAPS, mappers); + job.setJarByClass(TestMRIntermediateDataEncryption.class); + job.setJobName("mr-spill-" + postfixName); + // Mapper configuration + job.setMapperClass(TokenizerMapper.class); + job.setInputFormatClass(TextInputFormat.class); + job.setCombinerClass(LongSumReducer.class); + FileInputFormat.setMinInputSplitSize(job, + (inputFileSize + mappers) / mappers); + // Reducer configuration + job.setReducerClass(LongSumReducer.class); + job.setNumReduceTasks(reducers); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(LongWritable.class); + // Set the IO paths for the job. + FileInputFormat.addInputPath(job, jobInputDirPath); + FileOutputFormat.setOutputPath(job, jOutputPath); + if (job.waitForCompletion(true)) { + FileStatus[] fileStatusArr = + fs.listStatus(jOutputPath, + new Utils.OutputFileUtils.OutputFilesFilter()); + for (FileStatus fStatus : fileStatusArr) { + LOG.info("Job: {} .. Output file {} .. Size = {}", + postfixName, fStatus.getPath(), fStatus.getLen()); + } + } + return job; + } + + /** + * Compares the checksum of the output file to the + * checkSumReference. + * If the job has a multiple reducers, the output files are combined by + * launching another job. + * @return true if the checksums are equal. + * @throws Exception if the output is missing or the combiner job fails. + */ + private boolean validateJobOutput() throws Exception { + Assert.assertTrue("Job Output path [" + jobOutputPath + "] should exist", + fs.exists(jobOutputPath)); + Path outputPath = jobOutputPath; + if (numReducers != 1) { + // combine the result into one file by running a combiner job + final String jobRefLabel = testTitleName + "-combine"; + final Path jobRefDirPath = new Path(JOB_DIR_PATH, jobRefLabel); + if (fs.exists(jobRefDirPath) && !fs.delete(jobRefDirPath, true)) { + throw new IOException("Could not delete " + jobRefDirPath); + } + fs.mkdirs(jobRefDirPath); + outputPath = new Path(jobRefDirPath, "out-dir"); + Configuration referenceConf = new Configuration(commonConfig); + referenceConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, + false); + Job combinerJob = Job.getInstance(referenceConf); + combinerJob.setJarByClass(TestMRIntermediateDataEncryption.class); + combinerJob.setJobName("mr-spill-" + jobRefLabel); + combinerJob.setMapperClass(CombinerJobMapper.class); + FileInputFormat.addInputPath(combinerJob, jobOutputPath); + // Reducer configuration + combinerJob.setReducerClass(LongSumReducer.class); + combinerJob.setNumReduceTasks(1); + combinerJob.setOutputKeyClass(Text.class); + combinerJob.setOutputValueClass(LongWritable.class); + // Set the IO paths for the job. + FileOutputFormat.setOutputPath(combinerJob, outputPath); + if (!combinerJob.waitForCompletion(true)) { + return false; + } + FileStatus[] fileStatusArr = + fs.listStatus(outputPath, + new Utils.OutputFileUtils.OutputFilesFilter()); + LOG.info("Job-Combination: {} .. Output file {} .. Size = {}", + jobRefDirPath, fileStatusArr[0].getPath(), fileStatusArr[0].getLen()); + } + // Get the output files of the job. + FileStatus[] fileStatusArr = + fs.listStatus(outputPath, + new Utils.OutputFileUtils.OutputFilesFilter()); + FileChecksum jobFileChecksum = + fs.getFileChecksum(fileStatusArr[0].getPath()); + return checkSumReference.equals(jobFileChecksum); + } + @Before public void setup() throws Exception { LOG.info("Starting TestMRIntermediateDataEncryption#{}.......", @@ -284,16 +405,16 @@ public void setup() throws Exception { config = new Configuration(commonConfig); config.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, isUber); config.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 1.0F); - // set the configuration to make sure that we get spilled files + // Set the configuration to make sure that we get spilled files. long ioSortMb = TASK_SORT_IO_MB_DEFAULT; config.setLong(MRJobConfig.IO_SORT_MB, ioSortMb); long mapMb = Math.max(2 * ioSortMb, config.getInt(MRJobConfig.MAP_MEMORY_MB, MRJobConfig.DEFAULT_MAP_MEMORY_MB)); - // make sure the map tasks will spill to disk. + // Make sure the map tasks will spill to disk. config.setLong(MRJobConfig.MAP_MEMORY_MB, mapMb); config.set(MRJobConfig.MAP_JAVA_OPTS, "-Xmx" + (mapMb - 200) + "m"); config.setInt(MRJobConfig.NUM_MAPS, numMappers); - // max attempts have to be set to 1 when intermediate encryption is enabled. + // Max attempts have to be set to 1 when intermediate encryption is enabled. config.setInt("mapreduce.map.maxattempts", 1); config.setInt("mapreduce.reduce.maxattempts", 1); } @@ -302,24 +423,6 @@ public void setup() throws Exception { public void testWordCount() throws Exception { LOG.info("........Starting main Job Driver #{} starting at {}.......", testTitleName, Time.formatTime(System.currentTimeMillis())); - Job job = Job.getInstance(config); - job.getConfiguration().setInt(MRJobConfig.NUM_MAPS, numMappers); - job.setJarByClass(TestMRIntermediateDataEncryption.class); - job.setJobName("mr-spill-" + testTitleName); - // Mapper configuration - job.setMapperClass(TokenizerMapper.class); - job.setInputFormatClass(TextInputFormat.class); - job.setCombinerClass(LongSumReducer.class); - FileInputFormat.setMinInputSplitSize(job, - (inputFileSize + numMappers) / numMappers); - // Reducer configuration - job.setReducerClass(LongSumReducer.class); - job.setNumReduceTasks(numReducers); - job.setOutputKeyClass(Text.class); - job.setOutputValueClass(LongWritable.class); - // Set the IO paths for the job. - FileInputFormat.addInputPath(job, jobInputDirPath); - FileOutputFormat.setOutputPath(job, jobOutputPath); SpillCallBackPathsFinder spillInjector = (SpillCallBackPathsFinder) IntermediateEncryptedStream .setSpillCBInjector(new SpillCallBackPathsFinder()); @@ -328,34 +431,36 @@ public void testWordCount() throws Exception { testTitleName)); try { long startTime = Time.monotonicNow(); - testSummary.append(String.format("%nJob %s ended at %s", + testSummary.append(String.format("%nJob %s started at %s", testTitleName, Time.formatTime(System.currentTimeMillis()))); - Assert.assertTrue(job.waitForCompletion(true)); + Job job = runWordCountJob(testTitleName, jobOutputPath, config, + numMappers, numReducers); + Assert.assertTrue(job.isSuccessful()); long endTime = Time.monotonicNow(); testSummary.append(String.format("%nJob %s ended at %s", job.getJobName(), Time.formatTime(System.currentTimeMillis()))); testSummary.append(String.format("%n\tThe job took %.3f seconds", (1.0 * (endTime - startTime)) / 1000)); - long spilledRecords = - job.getCounters().findCounter(TaskCounter.SPILLED_RECORDS).getValue(); - Assert.assertFalse( - "The encrypted spilled files should not be empty.", - spillInjector.getEncryptedSpilledFiles().isEmpty()); - Assert.assertTrue("Spill records must be greater than 0", - spilledRecords > 0); - Assert.assertTrue("Job Output path [" + jobOutputPath + "] should exist", - fs.exists(jobOutputPath)); - Assert.assertTrue("Invalid access to spill file positions", - spillInjector.getInvalidSpillEntries().isEmpty()); - FileStatus[] fileStatus = + FileStatus[] fileStatusArr = fs.listStatus(jobOutputPath, new Utils.OutputFileUtils.OutputFilesFilter()); - for (FileStatus fStatus : fileStatus) { + for (FileStatus fStatus : fileStatusArr) { long fileSize = fStatus.getLen(); testSummary.append( String.format("%n\tOutput file %s: %d", fStatus.getPath(), fileSize)); } + // Validate the checksum of the output. + Assert.assertTrue(validateJobOutput()); + // Check intermediate files and spilling. + long spilledRecords = + job.getCounters().findCounter(TaskCounter.SPILLED_RECORDS).getValue(); + Assert.assertTrue("Spill records must be greater than 0", + spilledRecords > 0); + Assert.assertFalse("The encrypted spilled files should not be empty.", + spillInjector.getEncryptedSpilledFiles().isEmpty()); + Assert.assertTrue("Invalid access to spill file positions", + spillInjector.getInvalidSpillEntries().isEmpty()); } finally { testSummary.append(spillInjector.getSpilledFileReport()); LOG.info(testSummary.toString()); @@ -408,4 +513,21 @@ public void map(Object key, Text value, } } } + + /** + * A Mapper that reads the output of WordCount passing it to the reducer. + * It is used to combine the output of multiple reducer jobs. + */ + public static class CombinerJobMapper + extends Mapper { + private final LongWritable sum = new LongWritable(0); + private final Text word = new Text(); + public void map(Object key, Text value, + Context context) throws IOException, InterruptedException { + String[] line = value.toString().split("\\s+"); + sum.set(Long.parseLong(line[1])); + word.set(line[0]); + context.write(word, sum); + } + } } From 47ebf5755ef4a05a2aeb890a0019499ae1cadc5c Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Mon, 22 Mar 2021 21:53:45 -0700 Subject: [PATCH 131/666] HADOOP-17598. Fix java doc issue introduced by HADOOP-17578. (#2802). Contributed by Xiaoyu Yao. Signed-off-by: Akira Ajisaka Signed-off-by: Ayush Saxena --- .../java/org/apache/hadoop/security/UserGroupInformation.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index c3d7a55a10e6e..cc32dae170c6c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -1920,7 +1920,6 @@ public T doAs(PrivilegedExceptionAction action /** * Log current UGI and token information into specified log. * @param ugi - UGI - * @throws IOException */ @InterfaceAudience.LimitedPrivate({"HDFS", "KMS"}) @InterfaceStability.Unstable From c40135fc52509d628d244a6c395bf8af2af62d2c Mon Sep 17 00:00:00 2001 From: Narges Shadab <54193416+Nargeshdb@users.noreply.github.com> Date: Tue, 23 Mar 2021 04:36:58 -0700 Subject: [PATCH 132/666] HDFS-15908. Possible Resource Leak in org.apache.hadoop.hdfs.qjournal.server.Journal (#2790). Contributed by Narges Shadab. Signed-off-by: Ayush Saxena --- .../java/org/apache/hadoop/hdfs/qjournal/server/Journal.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 99a087e2da5eb..238a000afcfb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -264,9 +264,9 @@ void format(NamespaceInfo nsInfo, boolean force) throws IOException { */ @Override // Closeable public void close() throws IOException { - storage.close(); IOUtils.closeStream(committedTxnId); IOUtils.closeStream(curSegment); + storage.close(); } JNStorage getStorage() { From 9a589f234bf862f57ad99f4f916b06c134582455 Mon Sep 17 00:00:00 2001 From: Jack Jiang <81204396+TJJack@users.noreply.github.com> Date: Tue, 23 Mar 2021 23:46:11 +0800 Subject: [PATCH 133/666] HADOOP-17599. Remove NULL checks before instanceof (#2804) --- .../main/java/org/apache/hadoop/fs/AbstractFileSystem.java | 2 +- .../java/org/apache/hadoop/fs/DelegationTokenRenewer.java | 2 +- .../src/main/java/org/apache/hadoop/fs/FileChecksum.java | 2 +- .../src/main/java/org/apache/hadoop/oncrpc/RpcCallCache.java | 2 +- .../org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java | 3 +-- .../api/protocolrecords/GetAllResourceProfilesResponse.java | 2 +- .../api/protocolrecords/GetAllResourceTypeInfoResponse.java | 2 +- .../yarn/api/protocolrecords/GetResourceProfileRequest.java | 2 +- .../yarn/api/protocolrecords/GetResourceProfileResponse.java | 2 +- .../hadoop/yarn/api/records/impl/LightWeightResource.java | 2 +- .../hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java | 2 +- .../org/apache/hadoop/yarn/server/volume/csi/VolumeId.java | 2 +- .../linux/privileged/PrivilegedOperation.java | 2 +- .../resourceplugin/deviceframework/AssignedDevice.java | 4 ++-- .../resourceplugin/gpu/AssignedGpuDevice.java | 4 ++-- .../containermanager/resourceplugin/gpu/GpuDevice.java | 4 ++-- .../containermanager/runtime/ContainerRuntimeContext.java | 2 +- 17 files changed, 20 insertions(+), 21 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 5796f23026200..d50cf1e03accc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -1353,7 +1353,7 @@ public int hashCode() { @Override //Object public boolean equals(Object other) { - if (other == null || !(other instanceof AbstractFileSystem)) { + if (!(other instanceof AbstractFileSystem)) { return false; } return myUri.equals(((AbstractFileSystem) other).myUri); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java index 19c29cfa3da12..38a7344d4f9ee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java @@ -97,7 +97,7 @@ public int hashCode() { public boolean equals(final Object that) { if (this == that) { return true; - } else if (that == null || !(that instanceof RenewAction)) { + } else if (!(that instanceof RenewAction)) { return false; } return token.equals(((RenewAction)that).token); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java index 62f1a9b3f486e..6822fa485622f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java @@ -47,7 +47,7 @@ public boolean equals(Object other) { if (other == this) { return true; } - if (other == null || !(other instanceof FileChecksum)) { + if (!(other instanceof FileChecksum)) { return false; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCallCache.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCallCache.java index 8632a387c6032..d6bd492042d36 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCallCache.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCallCache.java @@ -93,7 +93,7 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - if (obj == null || !(obj instanceof ClientRequest)) { + if (!(obj instanceof ClientRequest)) { return false; } ClientRequest other = (ClientRequest) obj; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java index 1f5072d58eda3..d6f26d702c885 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java @@ -188,8 +188,7 @@ public boolean equals(Object obj) { return true; } - if (obj == null - || !(obj instanceof AuthorizationComponent)) { + if (!(obj instanceof AuthorizationComponent)) { return false; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesResponse.java index 547770890d534..d6852f1d07615 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesResponse.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesResponse.java @@ -45,7 +45,7 @@ public boolean equals(Object other) { if (this == other) { return true; } - if (other == null || !(other instanceof GetAllResourceProfilesResponse)) { + if (!(other instanceof GetAllResourceProfilesResponse)) { return false; } return ((GetAllResourceProfilesResponse) other).getResourceProfiles() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoResponse.java index b57b96df3fd6e..dca9fe5e238e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoResponse.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoResponse.java @@ -45,7 +45,7 @@ public boolean equals(Object other) { if (this == other) { return true; } - if (other == null || !(other instanceof GetAllResourceTypeInfoResponse)) { + if (!(other instanceof GetAllResourceTypeInfoResponse)) { return false; } return ((GetAllResourceTypeInfoResponse) other).getResourceTypeInfo() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileRequest.java index 3655be946da94..2588bb9006c07 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileRequest.java @@ -45,7 +45,7 @@ public boolean equals(Object other) { if (this == other) { return true; } - if (other == null || !(other instanceof GetResourceProfileRequest)) { + if (!(other instanceof GetResourceProfileRequest)) { return false; } return this.getProfileName() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileResponse.java index a010644a7929a..7aa20c51bc085 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileResponse.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileResponse.java @@ -54,7 +54,7 @@ public boolean equals(Object other) { return true; } - if (other == null || !(other instanceof GetResourceProfileResponse)) { + if (!(other instanceof GetResourceProfileResponse)) { return false; } return this.getResource() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java index 02afe50cc233d..8172128607aea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java @@ -139,7 +139,7 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - if (obj == null || !(obj instanceof Resource)) { + if (!(obj instanceof Resource)) { return false; } Resource other = (Resource) obj; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java index 9f5de8c1517ec..fffc8a857cb3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java @@ -68,7 +68,7 @@ public int compareTo(PurgeElement e) { @Override public boolean equals(Object e) { - if (e == null || !(e instanceof PurgeElement)) { + if (!(e instanceof PurgeElement)) { return false; } if (e == this) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeId.java index 8acc95e4d9af6..54870385ffcb8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeId.java @@ -43,7 +43,7 @@ public String toString() { @Override public boolean equals(Object obj) { - if (obj == null || !(obj instanceof VolumeId)) { + if (!(obj instanceof VolumeId)) { return false; } return StringUtils.equalsIgnoreCase(volumeId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java index 979f4821b24df..e50d17d5c7b6b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java @@ -135,7 +135,7 @@ public List getArguments() { @Override public boolean equals(Object other) { - if (other == null || !(other instanceof PrivilegedOperation)) { + if (!(other instanceof PrivilegedOperation)) { return false; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/AssignedDevice.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/AssignedDevice.java index cad95ab9dc4e1..e5ae1a2dc9596 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/AssignedDevice.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/AssignedDevice.java @@ -49,7 +49,7 @@ public String getContainerId() { @Override public int compareTo(Object o) { - if (o == null || !(o instanceof AssignedDevice)) { + if (!(o instanceof AssignedDevice)) { return -1; } AssignedDevice other = (AssignedDevice) o; @@ -62,7 +62,7 @@ public int compareTo(Object o) { @Override public boolean equals(Object o) { - if (o == null || !(o instanceof AssignedDevice)) { + if (!(o instanceof AssignedDevice)) { return false; } AssignedDevice other = (AssignedDevice) o; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/AssignedGpuDevice.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/AssignedGpuDevice.java index 26fd9050742d5..e16423fd984d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/AssignedGpuDevice.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/AssignedGpuDevice.java @@ -44,7 +44,7 @@ public void setContainerId(String containerId) { @Override public boolean equals(Object obj) { - if (obj == null || !(obj instanceof AssignedGpuDevice)) { + if (!(obj instanceof AssignedGpuDevice)) { return false; } AssignedGpuDevice other = (AssignedGpuDevice) obj; @@ -54,7 +54,7 @@ public boolean equals(Object obj) { @Override public int compareTo(Object obj) { - if (obj == null || (!(obj instanceof AssignedGpuDevice))) { + if ((!(obj instanceof AssignedGpuDevice))) { return -1; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDevice.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDevice.java index bce1d9fa480d9..cdfc6fab29fd2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDevice.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDevice.java @@ -43,7 +43,7 @@ public int getMinorNumber() { @Override public boolean equals(Object obj) { - if (obj == null || !(obj instanceof GpuDevice)) { + if (!(obj instanceof GpuDevice)) { return false; } GpuDevice other = (GpuDevice) obj; @@ -52,7 +52,7 @@ public boolean equals(Object obj) { @Override public int compareTo(Object obj) { - if (obj == null || (!(obj instanceof GpuDevice))) { + if (!(obj instanceof GpuDevice)) { return -1; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java index 4194b99300683..1cf899d77dd39 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeContext.java @@ -54,7 +54,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (obj == null || !(obj instanceof Attribute)){ + if (!(obj instanceof Attribute)){ return false; } From d9a1145d86d10bb20d6d86cd99de3a5784b96742 Mon Sep 17 00:00:00 2001 From: Jack Jiang <81204396+TJJack@users.noreply.github.com> Date: Tue, 23 Mar 2021 23:51:35 +0800 Subject: [PATCH 134/666] HDFS-15913. Remove useless NULL checks before instanceof (#2805) --- .../org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java | 2 +- .../org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java | 2 +- .../org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java | 2 +- .../java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java | 2 +- .../hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java | 2 +- .../apache/hadoop/hdfs/server/datanode/ErrorReportAction.java | 2 +- .../hadoop/hdfs/server/datanode/ReportBadBlockAction.java | 2 +- .../org/apache/hadoop/hdfs/server/datanode/StorageLocation.java | 2 +- .../main/java/org/apache/hadoop/hdfs/server/namenode/INode.java | 2 +- .../org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java | 2 +- .../apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java | 2 +- .../src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java | 2 +- .../src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java index 7bd3f969972c9..b0ea3ea9b3d41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java @@ -219,7 +219,7 @@ public int hashCode() { public boolean equals(Object obj) { if (obj == this) { return true; - } else if (obj == null || !(obj instanceof BlockStoragePolicy)) { + } else if (!(obj instanceof BlockStoragePolicy)) { return false; } final BlockStoragePolicy that = (BlockStoragePolicy)obj; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java index b7a7e98a13de1..05d2e031556d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java @@ -86,7 +86,7 @@ public int hashCode() { public boolean equals(Object obj) { if (obj == this) { return true; - } else if (obj == null || !(obj instanceof RollingUpgradeInfo)) { + } else if (!(obj instanceof RollingUpgradeInfo)) { return false; } final RollingUpgradeInfo that = (RollingUpgradeInfo)obj; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java index 1f969fbb0c12b..2e8e0eb528731 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java @@ -51,7 +51,7 @@ public int hashCode() { public boolean equals(Object obj) { if (obj == this) { return true; - } else if (obj == null || !(obj instanceof RollingUpgradeStatus)) { + } else if (!(obj instanceof RollingUpgradeStatus)) { return false; } final RollingUpgradeStatus that = (RollingUpgradeStatus) obj; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index 17f0d8f73b38c..a9538009ddb59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -622,7 +622,7 @@ public int hashCode() { public boolean equals(Object obj) { if (this == obj) { return true; - } else if (obj == null || !(obj instanceof StorageGroup)) { + } else if (!(obj instanceof StorageGroup)) { return false; } else { final StorageGroup that = (StorageGroup) obj; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index ee20ada2e8f87..8e46a2682409a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -321,7 +321,7 @@ public static void decrementBlocksScheduled(DatanodeStorageInfo... storages) { public boolean equals(Object obj) { if (this == obj) { return true; - } else if (obj == null || !(obj instanceof DatanodeStorageInfo)) { + } else if (!(obj instanceof DatanodeStorageInfo)) { return false; } final DatanodeStorageInfo that = (DatanodeStorageInfo)obj; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ErrorReportAction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ErrorReportAction.java index 26498d4a0fc45..c56c5070a22ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ErrorReportAction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ErrorReportAction.java @@ -68,7 +68,7 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - if (obj == null || !(obj instanceof ErrorReportAction)) { + if (!(obj instanceof ErrorReportAction)) { return false; } ErrorReportAction other = (ErrorReportAction) obj; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java index 2946358a5cbe2..1e72a6bc8904a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java @@ -88,7 +88,7 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - if (obj == null || !(obj instanceof ReportBadBlockAction)) { + if (!(obj instanceof ReportBadBlockAction)) { return false; } ReportBadBlockAction other = (ReportBadBlockAction) obj; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java index 5fc8410840d39..d6701c06cfc34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java @@ -200,7 +200,7 @@ public String toString() { @Override public boolean equals(Object obj) { - if (obj == null || !(obj instanceof StorageLocation)) { + if (!(obj instanceof StorageLocation)) { return false; } int comp = compareTo((StorageLocation) obj); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 3752117d4b367..daff95c373911 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -850,7 +850,7 @@ public final boolean equals(Object that) { if (this == that) { return true; } - if (that == null || !(that instanceof INode)) { + if (!(that instanceof INode)) { return false; } return getId() == ((INode) that).getId(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java index 002972caf63e8..d1b777c71e5b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java @@ -275,7 +275,7 @@ public String toString() { public boolean equals(Object obj) { if (obj == this) { return true; - } else if (obj == null || !(obj instanceof QuotaCounts)) { + } else if (!(obj instanceof QuotaCounts)) { return false; } final QuotaCounts that = (QuotaCounts)obj; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java index e9a8a29f47c82..3e78c82ea310f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java @@ -232,7 +232,7 @@ public int compareTo(byte[] bytes) { public boolean equals(Object that) { if (this == that) { return true; - } else if (that == null || !(that instanceof Snapshot)) { + } else if (!(that instanceof Snapshot)) { return false; } return this.id == ((Snapshot)that).id; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java index 880bf6edb2516..fc3039af70ec5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java @@ -130,7 +130,7 @@ public long sum() { public boolean equals(Object obj) { if (obj == this) { return true; - } else if (obj == null || !(obj instanceof EnumCounters)) { + } else if (!(obj instanceof EnumCounters)) { return false; } final EnumCounters that = (EnumCounters)obj; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java index fee687edf54b1..9012ceab3e198 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java @@ -102,7 +102,7 @@ public final void subtract(final EnumDoubles that) { public boolean equals(Object obj) { if (obj == this) { return true; - } else if (obj == null || !(obj instanceof EnumDoubles)) { + } else if (!(obj instanceof EnumDoubles)) { return false; } final EnumDoubles that = (EnumDoubles)obj; From bc825e98573e1bbb9840779e029d9ca5bf360830 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Tue, 23 Mar 2021 22:28:19 +0530 Subject: [PATCH 135/666] HDFS-15910. Improve security with explicit_bzero (#2793) --- .../src/main/native/libhdfspp/lib/bindings/c/hdfs.cc | 2 +- .../src/main/native/libhdfspp/tests/hdfs_ext_test.cc | 2 +- .../src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc index 7e5815d8a364a..efa4c750108b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc @@ -1395,7 +1395,7 @@ int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations hdfsBlockLocations *locations = new struct hdfsBlockLocations(); (*locations_out) = locations; - bzero(locations, sizeof(*locations)); + explicit_bzero(locations, sizeof(*locations)); locations->fileLength = ppLocations->getFileLength(); locations->isLastBlockComplete = ppLocations->isLastBlockComplete(); locations->isUnderConstruction = ppLocations->isUnderConstruction(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc index 79771f0d7c57c..e69ddb26af4fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc @@ -475,7 +475,7 @@ TEST_F(HdfsExtTest, TestReadStats) { hdfsFile file = hdfsOpenFile(fs, path.c_str(), O_WRONLY, 0, 0, 0); EXPECT_NE(nullptr, file); void * buf = malloc(size); - bzero(buf, size); + explicit_bzero(buf, size); EXPECT_EQ(size, hdfsWrite(fs, file, buf, size)); free(buf); EXPECT_EQ(0, hdfsCloseFile(fs, file)); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h index aecced1a8b6e5..98edbdc1d6501 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h @@ -92,7 +92,7 @@ class HdfsHandle { hdfsFile file = hdfsOpenFile(*this, path.c_str(), O_WRONLY, 0, 0, 0); EXPECT_NE(nullptr, file); void * buf = malloc(size); - bzero(buf, size); + explicit_bzero(buf, size); EXPECT_EQ(1024, hdfsWrite(*this, file, buf, size)); EXPECT_EQ(0, hdfsCloseFile(*this, file)); free(buf); From 1e9430dc0b7371885dc1916b13b71e6098dde924 Mon Sep 17 00:00:00 2001 From: Jim Brennan Date: Tue, 23 Mar 2021 18:21:45 +0000 Subject: [PATCH 136/666] YARN-10697. Resources are displayed in bytes in UI for schedulers other than capacity. Contributed by Bilwa S T. --- .../apache/hadoop/yarn/api/records/Resource.java | 16 +++++++++++++++- .../webapp/MetricsOverviewTable.java | 12 ++++++------ 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java index 83e1c5fe09b27..b91658f759c89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java @@ -31,6 +31,7 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.impl.LightWeightResource; @@ -465,9 +466,13 @@ public int compareTo(Resource other) { @Override public String toString() { + return getFormattedString(String.valueOf(getMemorySize())); + } + + private String getFormattedString(String memory) { StringBuilder sb = new StringBuilder(); - sb.append(" Date: Wed, 24 Mar 2021 02:10:02 +0530 Subject: [PATCH 137/666] HDFS-15907. Reduce Memory Overhead of AclFeature by avoiding AtomicInteger. Contributed by Stephen O'Donnell. --- .../hadoop/hdfs/server/namenode/AclFeature.java | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java index 6b2baa4e31bb5..2dfe50742dac2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode; import java.util.Arrays; -import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.AclEntry; @@ -35,7 +34,7 @@ public class AclFeature implements INode.Feature, ReferenceCounter { public static final ImmutableList EMPTY_ENTRY_LIST = ImmutableList.of(); - private AtomicInteger value = new AtomicInteger(); + private int refCount = 0; private final int [] entries; @@ -84,17 +83,17 @@ public int hashCode() { } @Override - public int getRefCount() { - return value.get(); + public synchronized int getRefCount() { + return refCount; } @Override - public int incrementAndGetRefCount() { - return value.incrementAndGet(); + public synchronized int incrementAndGetRefCount() { + return ++refCount; } @Override - public int decrementAndGetRefCount() { - return value.updateAndGet(i -> i > 0 ? i - 1 : i); + public synchronized int decrementAndGetRefCount() { + return (refCount > 0) ? --refCount : 0; } } From b73c982c3ea62ccc0a0dc3afc3ae6e2dbfa2db58 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 24 Mar 2021 02:36:26 +0530 Subject: [PATCH 138/666] HADOOP-17531. DistCp: Reduce memory usage on copying huge directories. (#2732). Contributed by Ayush Saxena. Signed-off-by: Steve Loughran --- .../functional/CommonCallableSupplier.java | 153 +++++++++ .../apache/hadoop/test/GenericTestUtils.java | 149 ++++++++- .../s3a/impl/ITestPartialRenamesDeletes.java | 46 +-- .../fs/s3a/scale/ITestS3ADeleteManyFiles.java | 2 +- .../apache/hadoop/tools/DistCpConstants.java | 1 + .../apache/hadoop/tools/DistCpContext.java | 4 + .../hadoop/tools/DistCpOptionSwitch.java | 7 +- .../apache/hadoop/tools/DistCpOptions.java | 19 ++ .../apache/hadoop/tools/OptionsParser.java | 4 +- .../hadoop/tools/SimpleCopyListing.java | 294 +++++++++++------- .../src/site/markdown/DistCp.md.vm | 1 + .../hadoop/tools/TestDistCpOptions.java | 2 +- .../apache/hadoop/tools/TestDistCpSystem.java | 5 - .../hadoop/tools/TestDistCpWithRawXAttrs.java | 23 ++ .../contract/AbstractContractDistCpTest.java | 39 +++ .../OptionalTestHDFSContractDistCp.java | 50 +++ .../src/test/resources/contract/hdfs.xml | 139 +++++++++ 17 files changed, 773 insertions(+), 165 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CommonCallableSupplier.java create mode 100644 hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/OptionalTestHDFSContractDistCp.java create mode 100644 hadoop-tools/hadoop-distcp/src/test/resources/contract/hdfs.xml diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CommonCallableSupplier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CommonCallableSupplier.java new file mode 100644 index 0000000000000..e2cdc0fd41472 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CommonCallableSupplier.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.functional; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.Executor; +import java.util.function.Supplier; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.util.DurationInfo; + +import static org.apache.hadoop.fs.impl.FutureIOSupport.raiseInnerCause; + +/** + * A bridge from Callable to Supplier; catching exceptions + * raised by the callable and wrapping them as appropriate. + * @param return type. + */ +public final class CommonCallableSupplier implements Supplier { + + private static final Logger LOG = + LoggerFactory.getLogger(CommonCallableSupplier.class); + + private final Callable call; + + /** + * Create. + * @param call call to invoke. + */ + public CommonCallableSupplier(final Callable call) { + this.call = call; + } + + @Override + public Object get() { + try { + return call.call(); + } catch (RuntimeException e) { + throw e; + } catch (IOException e) { + throw new UncheckedIOException(e); + } catch (Exception e) { + throw new UncheckedIOException(new IOException(e)); + } + } + + /** + * Submit a callable into a completable future. + * RTEs are rethrown. + * Non RTEs are caught and wrapped; IOExceptions to + * {@code RuntimeIOException} instances. + * @param executor executor. + * @param call call to invoke + * @param type + * @return the future to wait for + */ + @SuppressWarnings("unchecked") + public static CompletableFuture submit(final Executor executor, + final Callable call) { + return CompletableFuture + .supplyAsync(new CommonCallableSupplier(call), executor); + } + + /** + * Wait for a list of futures to complete. If the list is empty, + * return immediately. + * @param futures list of futures. + * @throws IOException if one of the called futures raised an IOE. + * @throws RuntimeException if one of the futures raised one. + */ + public static void waitForCompletion( + final List> futures) throws IOException { + if (futures.isEmpty()) { + return; + } + // await completion + waitForCompletion( + CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))); + } + + /** + * Wait for a single of future to complete, extracting IOEs afterwards. + * @param future future to wait for. + * @throws IOException if one of the called futures raised an IOE. + * @throws RuntimeException if one of the futures raised one. + */ + public static void waitForCompletion(final CompletableFuture future) + throws IOException { + try (DurationInfo ignore = new DurationInfo(LOG, false, + "Waiting for task completion")) { + future.join(); + } catch (CancellationException e) { + throw new IOException(e); + } catch (CompletionException e) { + raiseInnerCause(e); + } + } + + /** + * Wait for a single of future to complete, ignoring exceptions raised. + * @param future future to wait for. + */ + public static void waitForCompletionIgnoringExceptions( + @Nullable final CompletableFuture future) { + if (future != null) { + try (DurationInfo ignore = new DurationInfo(LOG, false, + "Waiting for task completion")) { + future.join(); + } catch (Exception e) { + LOG.debug("Ignoring exception raised in task completion: "); + } + } + } + + /** + * Block awaiting completion for any non-null future passed in; + * No-op if a null arg was supplied. + * @param future future + * @throws IOException if one of the called futures raised an IOE. + * @throws RuntimeException if one of the futures raised one. + */ + public static void maybeAwaitCompletion( + @Nullable final CompletableFuture future) throws IOException { + if (future != null) { + waitForCompletion(future); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index e266f285685ac..33ebd8695b263 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -30,13 +30,17 @@ import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Locale; import java.util.Objects; import java.util.Random; import java.util.Set; import java.util.Enumeration; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; @@ -46,8 +50,11 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.BlockingThreadPoolExecutorService; +import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Appender; @@ -61,15 +68,28 @@ import org.junit.Assume; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; +import static org.apache.hadoop.util.functional.CommonCallableSupplier.submit; +import static org.apache.hadoop.util.functional.CommonCallableSupplier.waitForCompletion; + /** * Test provides some very generic helpers which might be used across the tests */ public abstract class GenericTestUtils { + public static final int EXECUTOR_THREAD_COUNT = 64; + + private static final org.slf4j.Logger LOG = + LoggerFactory.getLogger(GenericTestUtils.class); + + public static final String PREFIX = "file-"; + private static final AtomicInteger sequence = new AtomicInteger(); /** @@ -896,5 +916,132 @@ public static int getTestsThreadCount() { } return threadCount; } + /** + * Write the text to a file asynchronously. Logs the operation duration. + * @param fs filesystem + * @param path path + * @return future to the patch created. + */ + private static CompletableFuture put(FileSystem fs, + Path path, String text) { + return submit(EXECUTOR, () -> { + try (DurationInfo ignore = + new DurationInfo(LOG, false, "Creating %s", path)) { + createFile(fs, path, true, text.getBytes(Charsets.UTF_8)); + return path; + } + }); + } + + /** + * Build a set of files in a directory tree. + * @param fs filesystem + * @param destDir destination + * @param depth file depth + * @param fileCount number of files to create. + * @param dirCount number of dirs to create at each level + * @return the list of files created. + */ + public static List createFiles(final FileSystem fs, + final Path destDir, + final int depth, + final int fileCount, + final int dirCount) throws IOException { + return createDirsAndFiles(fs, destDir, depth, fileCount, dirCount, + new ArrayList(fileCount), + new ArrayList(dirCount)); + } + + /** + * Build a set of files in a directory tree. + * @param fs filesystem + * @param destDir destination + * @param depth file depth + * @param fileCount number of files to create. + * @param dirCount number of dirs to create at each level + * @param paths [out] list of file paths created + * @param dirs [out] list of directory paths created. + * @return the list of files created. + */ + public static List createDirsAndFiles(final FileSystem fs, + final Path destDir, + final int depth, + final int fileCount, + final int dirCount, + final List paths, + final List dirs) throws IOException { + buildPaths(paths, dirs, destDir, depth, fileCount, dirCount); + List> futures = new ArrayList<>(paths.size() + + dirs.size()); + + // create directories. With dir marker retention, that adds more entries + // to cause deletion issues + try (DurationInfo ignore = + new DurationInfo(LOG, "Creating %d directories", dirs.size())) { + for (Path path : dirs) { + futures.add(submit(EXECUTOR, () ->{ + fs.mkdirs(path); + return path; + })); + } + waitForCompletion(futures); + } + + try (DurationInfo ignore = + new DurationInfo(LOG, "Creating %d files", paths.size())) { + for (Path path : paths) { + futures.add(put(fs, path, path.getName())); + } + waitForCompletion(futures); + return paths; + } + } -} + /** + * Recursive method to build up lists of files and directories. + * @param filePaths list of file paths to add entries to. + * @param dirPaths list of directory paths to add entries to. + * @param destDir destination directory. + * @param depth depth of directories + * @param fileCount number of files. + * @param dirCount number of directories. + */ + public static void buildPaths(final List filePaths, + final List dirPaths, final Path destDir, final int depth, + final int fileCount, final int dirCount) { + if (depth <= 0) { + return; + } + // create the file paths + for (int i = 0; i < fileCount; i++) { + String name = filenameOfIndex(i); + Path p = new Path(destDir, name); + filePaths.add(p); + } + for (int i = 0; i < dirCount; i++) { + String name = String.format("dir-%03d", i); + Path p = new Path(destDir, name); + dirPaths.add(p); + buildPaths(filePaths, dirPaths, p, depth - 1, fileCount, dirCount); + } + } + + /** + * Given an index, return a string to use as the filename. + * @param i index + * @return name + */ + public static String filenameOfIndex(final int i) { + return String.format("%s%03d", PREFIX, i); + } + + /** + * For submitting work. + */ + private static final BlockingThreadPoolExecutorService EXECUTOR = + BlockingThreadPoolExecutorService.newInstance( + EXECUTOR_THREAD_COUNT, + EXECUTOR_THREAD_COUNT * 2, + 30, TimeUnit.SECONDS, + "test-operations"); +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java index c920be13230dd..df45d0def5a4f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java @@ -79,6 +79,7 @@ import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.extractCause; import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; +import static org.apache.hadoop.test.GenericTestUtils.buildPaths; import static org.apache.hadoop.test.LambdaTestUtils.eval; /** @@ -162,8 +163,6 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase { public static final int DEPTH = 2; public static final int DEPTH_SCALED = 2; - public static final String PREFIX = "file-"; - /** * A role FS; if non-null it is closed in teardown. */ @@ -910,49 +909,6 @@ public static List createDirsAndFiles(final FileSystem fs, } } - /** - * Recursive method to build up lists of files and directories. - * @param filePaths list of file paths to add entries to. - * @param dirPaths list of directory paths to add entries to. - * @param destDir destination directory. - * @param depth depth of directories - * @param fileCount number of files. - * @param dirCount number of directories. - */ - private static void buildPaths( - final List filePaths, - final List dirPaths, - final Path destDir, - final int depth, - final int fileCount, - final int dirCount) { - if (depth<=0) { - return; - } - // create the file paths - for (int i = 0; i < fileCount; i++) { - String name = filenameOfIndex(i); - Path p = new Path(destDir, name); - filePaths.add(p); - } - for (int i = 0; i < dirCount; i++) { - String name = String.format("dir-%03d", i); - Path p = new Path(destDir, name); - dirPaths.add(p); - buildPaths(filePaths, dirPaths, p, depth - 1, fileCount, dirCount); - } - - } - - /** - * Given an index, return a string to use as the filename. - * @param i index - * @return name - */ - public static String filenameOfIndex(final int i) { - return String.format("%s%03d", PREFIX, i); - } - /** * Verifies that s3:DeleteObjectVersion is not required for rename. *

diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java index efaec5f4fa5f0..a724b9737c948 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java @@ -39,7 +39,7 @@ import static org.apache.hadoop.fs.s3a.Constants.USER_AGENT_PREFIX; import static org.apache.hadoop.fs.s3a.S3ATestUtils.lsR; import static org.apache.hadoop.fs.s3a.impl.ITestPartialRenamesDeletes.createFiles; -import static org.apache.hadoop.fs.s3a.impl.ITestPartialRenamesDeletes.filenameOfIndex; +import static org.apache.hadoop.test.GenericTestUtils.filenameOfIndex; /** * Test some scalable operations related to file renaming and deletion. diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java index 25815687c2973..c75c0e85dd791 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java @@ -139,6 +139,7 @@ private DistCpConstants() { public static final String CONF_LABEL_BLOCKS_PER_CHUNK = "distcp.blocks.per.chunk"; + public static final String CONF_LABEL_USE_ITERATOR = "distcp.use.iterator"; /** * Constants for DistCp return code to shell / consumer of ToolRunner's run */ diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpContext.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpContext.java index 1e63d802e876c..0d08796ce2d75 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpContext.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpContext.java @@ -171,6 +171,10 @@ public int getBlocksPerChunk() { return options.getBlocksPerChunk(); } + public boolean shouldUseIterator() { + return options.shouldUseIterator(); + } + public final boolean splitLargeFile() { return options.getBlocksPerChunk() > 0; } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java index 3d319dae83510..4163f8274d967 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java @@ -239,7 +239,12 @@ public enum DistCpOptionSwitch { */ DIRECT_WRITE(DistCpConstants.CONF_LABEL_DIRECT_WRITE, new Option("direct", false, "Write files directly to the" - + " target location, avoiding temporary file rename.")); + + " target location, avoiding temporary file rename.")), + + USE_ITERATOR(DistCpConstants.CONF_LABEL_USE_ITERATOR, + new Option("useiterator", false, + "Use single threaded list status iterator to build " + + "the listing to save the memory utilisation at the client")); public static final String PRESERVE_STATUS_DEFAULT = "-prbugpct"; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java index 9354c5ea9f583..6315528fb88a4 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java @@ -158,6 +158,8 @@ public final class DistCpOptions { /** Whether data should be written directly to the target paths. */ private final boolean directWrite; + private final boolean useIterator; + /** * File attributes for preserve. * @@ -222,6 +224,8 @@ private DistCpOptions(Builder builder) { this.trackPath = builder.trackPath; this.directWrite = builder.directWrite; + + this.useIterator = builder.useIterator; } public Path getSourceFileListing() { @@ -353,6 +357,10 @@ public boolean shouldDirectWrite() { return directWrite; } + public boolean shouldUseIterator() { + return useIterator; + } + /** * Add options to configuration. These will be used in the Mapper/committer * @@ -403,6 +411,9 @@ public void appendToConf(Configuration conf) { } DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.DIRECT_WRITE, String.valueOf(directWrite)); + + DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.USE_ITERATOR, + String.valueOf(useIterator)); } /** @@ -440,6 +451,7 @@ public String toString() { ", copyBufferSize=" + copyBufferSize + ", verboseLog=" + verboseLog + ", directWrite=" + directWrite + + ", useiterator=" + useIterator + '}'; } @@ -491,6 +503,8 @@ public static class Builder { private boolean directWrite = false; + private boolean useIterator = false; + public Builder(List sourcePaths, Path targetPath) { Preconditions.checkArgument(sourcePaths != null && !sourcePaths.isEmpty(), "Source paths should not be null or empty!"); @@ -748,6 +762,11 @@ public Builder withDirectWrite(boolean newDirectWrite) { this.directWrite = newDirectWrite; return this; } + + public Builder withUseIterator(boolean useItr) { + this.useIterator = useItr; + return this; + } } } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java index 1fbea9a0ea287..a4c3b0f851f7b 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java @@ -115,7 +115,9 @@ public static DistCpOptions parse(String[] args) .withVerboseLog( command.hasOption(DistCpOptionSwitch.VERBOSE_LOG.getSwitch())) .withDirectWrite( - command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch())); + command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch())) + .withUseIterator( + command.hasOption(DistCpOptionSwitch.USE_ITERATOR.getSwitch())); if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) { String[] snapshots = getVals(command, diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java index ddcbb14e8314f..900ce6296db08 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java @@ -25,6 +25,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.statistics.IOStatisticsLogging; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.io.SequenceFile; @@ -36,6 +38,8 @@ import org.apache.hadoop.tools.util.WorkReport; import org.apache.hadoop.tools.util.WorkRequest; import org.apache.hadoop.tools.util.WorkRequestProcessor; +import org.apache.hadoop.util.DurationInfo; +import org.apache.hadoop.util.functional.RemoteIterators; import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.security.Credentials; @@ -49,6 +53,7 @@ import java.util.List; import java.util.Random; import java.util.LinkedList; +import java.util.Stack; import static org.apache.hadoop.tools.DistCpConstants .HDFS_RESERVED_RAW_DIRECTORY_NAME; @@ -94,11 +99,9 @@ protected SimpleCopyListing(Configuration configuration, Credentials credentials randomizeFileListing = getConf().getBoolean( DistCpConstants.CONF_LABEL_SIMPLE_LISTING_RANDOMIZE_FILES, DEFAULT_RANDOMIZE_FILE_LISTING); - if (LOG.isDebugEnabled()) { - LOG.debug("numListstatusThreads=" + numListstatusThreads - + ", fileStatusLimit=" + fileStatusLimit - + ", randomizeFileListing=" + randomizeFileListing); - } + LOG.debug( + "numListstatusThreads={}, fileStatusLimit={}, randomizeFileListing={}", + numListstatusThreads, fileStatusLimit, randomizeFileListing); copyFilter = CopyFilter.getCopyFilter(getConf()); copyFilter.initialize(); } @@ -286,10 +289,8 @@ protected void doBuildListingWithSnapshotDiff( FileStatus sourceStatus = sourceFS.getFileStatus(diff.getTarget()); if (sourceStatus.isDirectory()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Adding source dir for traverse: " + - sourceStatus.getPath()); - } + LOG.debug("Adding source dir for traverse: {}", + sourceStatus.getPath()); HashSet excludeList = distCpSync.getTraverseExcludeList(diff.getSource(), @@ -298,8 +299,9 @@ protected void doBuildListingWithSnapshotDiff( ArrayList sourceDirs = new ArrayList<>(); sourceDirs.add(sourceStatus); - traverseDirectory(fileListWriter, sourceFS, sourceDirs, - sourceRoot, context, excludeList, fileStatuses); + new TraverseDirectory(fileListWriter, sourceFS, sourceDirs, + sourceRoot, context, excludeList, fileStatuses) + .traverseDirectory(); } } } @@ -366,9 +368,8 @@ protected void doBuildListing(SequenceFile.Writer fileListWriter, if (explore) { ArrayList sourceDirs = new ArrayList(); for (FileStatus sourceStatus: sourceFiles) { - if (LOG.isDebugEnabled()) { - LOG.debug("Recording source-path: " + sourceStatus.getPath() + " for copy."); - } + LOG.debug("Recording source-path: {} for copy.", + sourceStatus.getPath()); LinkedList sourceCopyListingStatus = DistCpUtils.toCopyListingFileStatus(sourceFS, sourceStatus, preserveAcls && sourceStatus.isDirectory(), @@ -384,14 +385,13 @@ protected void doBuildListing(SequenceFile.Writer fileListWriter, } } if (sourceStatus.isDirectory()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Adding source dir for traverse: " + sourceStatus.getPath()); - } + LOG.debug("Adding source dir for traverse: {}", + sourceStatus.getPath()); sourceDirs.add(sourceStatus); } } - traverseDirectory(fileListWriter, sourceFS, sourceDirs, - sourcePathRoot, context, null, statusList); + new TraverseDirectory(fileListWriter, sourceFS, sourceDirs, + sourcePathRoot, context, null, statusList).traverseDirectory(); } } if (randomizeFileListing) { @@ -429,16 +429,12 @@ private void writeToFileListing(List fileStatusInfoList, */ Collections.shuffle(fileStatusInfoList, rnd); for (FileStatusInfo fileStatusInfo : fileStatusInfoList) { - if (LOG.isDebugEnabled()) { - LOG.debug("Adding " + fileStatusInfo.fileStatus.getPath()); - } + LOG.debug("Adding {}", fileStatusInfo.fileStatus.getPath()); writeToFileListing(fileListWriter, fileStatusInfo.fileStatus, fileStatusInfo.sourceRootPath); } - if (LOG.isDebugEnabled()) { - LOG.debug("Number of paths written to fileListing=" - + fileStatusInfoList.size()); - } + LOG.debug("Number of paths written to fileListing={}", + fileStatusInfoList.size()); fileStatusInfoList.clear(); } @@ -590,8 +586,8 @@ public WorkReport processItem( result = new WorkReport(getFileStatus(parent.getPath()), retry, true); } catch (FileNotFoundException fnf) { - LOG.error("FileNotFoundException exception in listStatus: " + - fnf.getMessage()); + LOG.error("FileNotFoundException exception in listStatus: {}", + fnf.getMessage()); result = new WorkReport(new FileStatus[0], retry, true, fnf); } catch (Exception e) { @@ -605,8 +601,7 @@ public WorkReport processItem( } private void printStats() { - LOG.info("Paths (files+dirs) cnt = " + totalPaths + - "; dirCnt = " + totalDirs); + LOG.info("Paths (files+dirs) cnt = {}; dirCnt = ", totalPaths, totalDirs); } private void maybePrintStats() { @@ -615,79 +610,6 @@ private void maybePrintStats() { } } - private void traverseDirectory(SequenceFile.Writer fileListWriter, - FileSystem sourceFS, - ArrayList sourceDirs, - Path sourcePathRoot, - DistCpContext context, - HashSet excludeList, - List fileStatuses) - throws IOException { - final boolean preserveAcls = context.shouldPreserve(FileAttribute.ACL); - final boolean preserveXAttrs = context.shouldPreserve(FileAttribute.XATTR); - final boolean preserveRawXattrs = context.shouldPreserveRawXattrs(); - - assert numListstatusThreads > 0; - if (LOG.isDebugEnabled()) { - LOG.debug("Starting thread pool of " + numListstatusThreads + - " listStatus workers."); - } - ProducerConsumer workers = - new ProducerConsumer(numListstatusThreads); - for (int i = 0; i < numListstatusThreads; i++) { - workers.addWorker( - new FileStatusProcessor(sourcePathRoot.getFileSystem(getConf()), - excludeList)); - } - - for (FileStatus status : sourceDirs) { - workers.put(new WorkRequest(status, 0)); - } - - while (workers.hasWork()) { - try { - WorkReport workResult = workers.take(); - int retry = workResult.getRetry(); - for (FileStatus child: workResult.getItem()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Recording source-path: " + child.getPath() + " for copy."); - } - if (workResult.getSuccess()) { - LinkedList childCopyListingStatus = - DistCpUtils.toCopyListingFileStatus(sourceFS, child, - preserveAcls && child.isDirectory(), - preserveXAttrs && child.isDirectory(), - preserveRawXattrs && child.isDirectory(), - context.getBlocksPerChunk()); - - for (CopyListingFileStatus fs : childCopyListingStatus) { - if (randomizeFileListing) { - addToFileListing(fileStatuses, - new FileStatusInfo(fs, sourcePathRoot), fileListWriter); - } else { - writeToFileListing(fileListWriter, fs, sourcePathRoot); - } - } - } - if (retry < maxRetries) { - if (child.isDirectory()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Traversing into source dir: " + child.getPath()); - } - workers.put(new WorkRequest(child, retry)); - } - } else { - LOG.error("Giving up on " + child.getPath() + - " after " + retry + " retries."); - } - } - } catch (InterruptedException ie) { - LOG.error("Could not get item from childQueue. Retrying..."); - } - } - workers.shutdown(); - } - private void writeToFileListingRoot(SequenceFile.Writer fileListWriter, LinkedList fileStatus, Path sourcePathRoot, DistCpContext context) throws IOException { @@ -697,9 +619,7 @@ private void writeToFileListingRoot(SequenceFile.Writer fileListWriter, if (fs.getPath().equals(sourcePathRoot) && fs.isDirectory() && syncOrOverwrite) { // Skip the root-paths when syncOrOverwrite - if (LOG.isDebugEnabled()) { - LOG.debug("Skip " + fs.getPath()); - } + LOG.debug("Skip {}", fs.getPath()); return; } writeToFileListing(fileListWriter, fs, sourcePathRoot); @@ -709,10 +629,9 @@ private void writeToFileListingRoot(SequenceFile.Writer fileListWriter, private void writeToFileListing(SequenceFile.Writer fileListWriter, CopyListingFileStatus fileStatus, Path sourcePathRoot) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("REL PATH: " + DistCpUtils.getRelativePath(sourcePathRoot, - fileStatus.getPath()) + ", FULL PATH: " + fileStatus.getPath()); - } + LOG.debug("REL PATH: {}, FULL PATH: {}", + DistCpUtils.getRelativePath(sourcePathRoot, fileStatus.getPath()), + fileStatus.getPath()); if (!shouldCopy(fileStatus.getPath())) { return; @@ -730,4 +649,159 @@ private void writeToFileListing(SequenceFile.Writer fileListWriter, totalPaths++; maybePrintStats(); } + + /** + * A utility class to traverse a directory. + */ + private final class TraverseDirectory { + + private SequenceFile.Writer fileListWriter; + private FileSystem sourceFS; + private ArrayList sourceDirs; + private Path sourcePathRoot; + private DistCpContext context; + private HashSet excludeList; + private List fileStatuses; + private final boolean preserveAcls; + private final boolean preserveXAttrs; + private final boolean preserveRawXattrs; + + private TraverseDirectory(SequenceFile.Writer fileListWriter, + FileSystem sourceFS, ArrayList sourceDirs, + Path sourcePathRoot, DistCpContext context, HashSet excludeList, + List fileStatuses) { + this.fileListWriter = fileListWriter; + this.sourceFS = sourceFS; + this.sourceDirs = sourceDirs; + this.sourcePathRoot = sourcePathRoot; + this.context = context; + this.excludeList = excludeList; + this.fileStatuses = fileStatuses; + this.preserveAcls = context.shouldPreserve(FileAttribute.ACL); + this.preserveXAttrs = context.shouldPreserve(FileAttribute.XATTR); + this.preserveRawXattrs = context.shouldPreserveRawXattrs(); + } + + public void traverseDirectory() throws IOException { + if (context.shouldUseIterator()) { + try (DurationInfo ignored = new DurationInfo(LOG, + "Building listing using iterator mode for %s", sourcePathRoot)) { + traverseDirectoryLegacy(); + } + } else { + try (DurationInfo ignored = new DurationInfo(LOG, + "Building listing using multi threaded approach for %s", + sourcePathRoot)) { + traverseDirectoryMultiThreaded(); + } + } + } + + public void traverseDirectoryMultiThreaded() throws IOException { + assert numListstatusThreads > 0; + + LOG.debug("Starting thread pool of {} listStatus workers.", + numListstatusThreads); + + ProducerConsumer workers = + new ProducerConsumer(numListstatusThreads); + try { + for (int i = 0; i < numListstatusThreads; i++) { + workers.addWorker( + new FileStatusProcessor(sourcePathRoot.getFileSystem(getConf()), + excludeList)); + } + + for (FileStatus status : sourceDirs) { + workers.put(new WorkRequest(status, 0)); + } + + while (workers.hasWork()) { + try { + WorkReport workResult = workers.take(); + int retry = workResult.getRetry(); + for (FileStatus child : workResult.getItem()) { + LOG.debug("Recording source-path: {} for copy.", child.getPath()); + boolean isChildDirectory = child.isDirectory(); + if (workResult.getSuccess()) { + LinkedList childCopyListingStatus = + DistCpUtils.toCopyListingFileStatus(sourceFS, child, + preserveAcls && isChildDirectory, + preserveXAttrs && isChildDirectory, + preserveRawXattrs && isChildDirectory, + context.getBlocksPerChunk()); + + for (CopyListingFileStatus fs : childCopyListingStatus) { + if (randomizeFileListing) { + addToFileListing(fileStatuses, + new FileStatusInfo(fs, sourcePathRoot), fileListWriter); + } else { + writeToFileListing(fileListWriter, fs, sourcePathRoot); + } + } + } + if (retry < maxRetries) { + if (isChildDirectory) { + LOG.debug("Traversing into source dir: {}", child.getPath()); + workers.put(new WorkRequest(child, retry)); + } + } else { + LOG.error("Giving up on {} after {} retries.", child.getPath(), + retry); + } + } + } catch (InterruptedException ie) { + LOG.error("Could not get item from childQueue. Retrying..."); + } + } + } finally { + workers.shutdown(); + } + } + + private void traverseDirectoryLegacy() throws IOException { + Stack pathStack = new Stack(); + for (FileStatus fs : sourceDirs) { + if (excludeList == null || !excludeList + .contains(fs.getPath().toUri().getPath())) { + pathStack.add(fs); + } + } + while (!pathStack.isEmpty()) { + prepareListing(pathStack.pop().getPath()); + } + } + + private void prepareListing(Path path) throws IOException { + LOG.debug("Recording source-path: {} for copy.", path); + RemoteIterator listStatus = RemoteIterators + .filteringRemoteIterator(sourceFS.listStatusIterator(path), + i -> excludeList == null || !excludeList + .contains(i.getPath().toUri().getPath())); + while (listStatus.hasNext()) { + FileStatus child = listStatus.next(); + LinkedList childCopyListingStatus = DistCpUtils + .toCopyListingFileStatus(sourceFS, child, + preserveAcls && child.isDirectory(), + preserveXAttrs && child.isDirectory(), + preserveRawXattrs && child.isDirectory(), + context.getBlocksPerChunk()); + for (CopyListingFileStatus fs : childCopyListingStatus) { + if (randomizeFileListing) { + addToFileListing(fileStatuses, + new FileStatusInfo(fs, sourcePathRoot), fileListWriter); + } else { + writeToFileListing(fileListWriter, fs, sourcePathRoot); + } + } + if (child.isDirectory()) { + LOG.debug("Traversing into source dir: {}", child.getPath()); + prepareListing(child.getPath()); + } + } + IOStatisticsLogging + .logIOStatisticsAtDebug(LOG, "RemoteIterator Statistics: {}", + listStatus); + } + } } diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm index e82d8bc96687b..136b6c8ca1339 100644 --- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm +++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm @@ -362,6 +362,7 @@ Command Line Options | `-copybuffersize ` | Size of the copy buffer to use. By default, `` is set to 8192B | | | `-xtrack ` | Save information about missing source files to the specified path. | This option is only valid with `-update` option. This is an experimental property and it cannot be used with `-atomic` option. | | `-direct` | Write directly to destination paths | Useful for avoiding potentially very expensive temporary file rename operations when the destination is an object store | +| `-useiterator` | Uses single threaded listStatusIterator to build listing | Useful for saving memory at the client side. Using this option will ignore the numListstatusThreads option | Architecture of DistCp ---------------------- diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java index 7382795dd90d7..13497029a0767 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java @@ -289,7 +289,7 @@ public void testToString() { "atomicWorkPath=null, logPath=null, sourceFileListing=abc, " + "sourcePaths=null, targetPath=xyz, filtersFile='null', " + "blocksPerChunk=0, copyBufferSize=8192, verboseLog=false, " + - "directWrite=false}"; + "directWrite=false, useiterator=false}"; String optionString = option.toString(); Assert.assertEquals(val, optionString); Assert.assertNotSame(DistCpOptionSwitch.ATOMIC_COMMIT.toString(), diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSystem.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSystem.java index 14cce42e0f86b..47b850f4ba3e2 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSystem.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSystem.java @@ -48,9 +48,7 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.Timeout; /** * A JUnit test for copying files recursively. @@ -60,9 +58,6 @@ public class TestDistCpSystem { private static final Logger LOG = LoggerFactory.getLogger(TestDistCpSystem.class); - @Rule - public Timeout globalTimeout = new Timeout(30000); - private static final String SRCDAT = "srcdat"; private static final String DSTDAT = "dstdat"; private static final long BLOCK_SIZE = 1024; diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java index 978ccdd3ea286..e0e103bfe8019 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java @@ -29,9 +29,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.tools.ECAdmin; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.tools.util.DistCpTestUtils; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.functional.RemoteIterators; +import org.assertj.core.api.Assertions; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -69,6 +72,7 @@ public class TestDistCpWithRawXAttrs { public static void init() throws Exception { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); + conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true) .build(); cluster.waitActive(); @@ -217,4 +221,23 @@ public void testPreserveEC() throws Exception { assertTrue("/dest/dir1/subdir1 is not erasure coded!", destSubDir1Status.isErasureCoded()); } + + @Test + public void testUseIterator() throws Exception { + + Path source = new Path("/src"); + Path dest = new Path("/dest"); + fs.delete(source, true); + fs.delete(dest, true); + // Create a source dir + fs.mkdirs(source); + + GenericTestUtils.createFiles(fs, source, 3, 10, 10); + + DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), + dest.toString(), "-useiterator", conf); + + Assertions.assertThat(RemoteIterators.toList(fs.listFiles(dest, true))) + .describedAs("files").hasSize(1110); + } } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index 1a40d78b2651a..202ead69a17bb 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -44,7 +44,9 @@ import org.apache.hadoop.tools.DistCpOptions; import org.apache.hadoop.tools.mapred.CopyMapper; import org.apache.hadoop.tools.util.DistCpTestUtils; +import org.apache.hadoop.util.functional.RemoteIterators; +import org.assertj.core.api.Assertions; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -59,6 +61,7 @@ * under test. The tests in the suite cover both copying from local to remote * (e.g. a backup use case) and copying from remote to local (e.g. a restore use * case). + * The HDFS contract test needs to be run explicitly. */ public abstract class AbstractContractDistCpTest extends AbstractFSContractTestBase { @@ -613,6 +616,42 @@ public void testNonDirectWrite() throws Exception { directWrite(localFS, localDir, remoteFS, remoteDir, false); } + @Test + public void testDistCpWithIterator() throws Exception { + describe("Build listing in distCp using the iterator option."); + Path source = new Path(remoteDir, "src"); + Path dest = new Path(localDir, "dest"); + dest = localFS.makeQualified(dest); + mkdirs(remoteFS, source); + verifyPathExists(remoteFS, "", source); + + GenericTestUtils + .createFiles(remoteFS, source, getDepth(), getWidth(), getWidth()); + + DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), + dest.toString(), "-useiterator", conf); + + Assertions + .assertThat(RemoteIterators.toList(localFS.listFiles(dest, true))) + .describedAs("files").hasSize(getTotalFiles()); + } + + public int getDepth() { + return 3; + } + + public int getWidth() { + return 10; + } + + private int getTotalFiles() { + int totalFiles = 0; + for (int i = 1; i <= getDepth(); i++) { + totalFiles += Math.pow(getWidth(), i); + } + return totalFiles; + } + /** * Executes a test with support for using direct write option. * diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/OptionalTestHDFSContractDistCp.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/OptionalTestHDFSContractDistCp.java new file mode 100644 index 0000000000000..d8c74240794be --- /dev/null +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/OptionalTestHDFSContractDistCp.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.tools.contract; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.fs.contract.hdfs.HDFSContract; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; + +/** + * Verifies that the HDFS passes all the tests in + * {@link AbstractContractDistCpTest}. + * As such, it acts as an in-module validation of this contract test itself. + */ +public class OptionalTestHDFSContractDistCp extends AbstractContractDistCpTest { + + @BeforeClass + public static void createCluster() throws IOException { + HDFSContract.createCluster(); + } + + @AfterClass + public static void teardownCluster() throws IOException { + HDFSContract.destroyCluster(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new HDFSContract(conf); + } +} diff --git a/hadoop-tools/hadoop-distcp/src/test/resources/contract/hdfs.xml b/hadoop-tools/hadoop-distcp/src/test/resources/contract/hdfs.xml new file mode 100644 index 0000000000000..3c9396f79adec --- /dev/null +++ b/hadoop-tools/hadoop-distcp/src/test/resources/contract/hdfs.xml @@ -0,0 +1,139 @@ + + + + + + + fs.contract.test.root-tests-enabled + true + + + + fs.file.contract.test.random-seek-count + 500 + + + + fs.contract.is-case-sensitive + true + + + + fs.contract.supports-append + true + + + + fs.contract.supports-atomic-directory-delete + true + + + + fs.contract.supports-atomic-rename + true + + + + fs.contract.supports-block-locality + true + + + + fs.contract.supports-concat + true + + + + fs.contract.supports-seek + true + + + + fs.contract.rejects-seek-past-eof + true + + + + fs.contract.supports-strict-exceptions + true + + + + fs.contract.supports-unix-permissions + true + + + + fs.contract.rename-returns-false-if-dest-exists + true + + + + fs.contract.rename-returns-false-if-source-missing + true + + + + fs.contract.supports-settimes + true + + + + fs.contract.supports-getfilestatus + true + + + + fs.contract.supports-file-reference + true + + + + fs.contract.supports-content-check + true + + + + fs.contract.supports-unbuffer + true + + + + fs.contract.supports-hflush + true + + + + fs.contract.supports-hsync + true + + + + fs.contract.metadata_updated_on_hsync + false + + + + + dfs.namenode.fs-limits.min-block-size + 0 + + From 823e9faa9b2385ff862a870a34bdcec80e5664b3 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 24 Mar 2021 11:17:45 +0530 Subject: [PATCH 139/666] HDFS-15911 : Provide blocks moved count in Balancer iteration result (#2794) Contributed by Viraj Jasani. Signed-off-by: Mingliang Liu Signed-off-by: Ayush Saxena --- .../hadoop/hdfs/server/balancer/Balancer.java | 28 ++++++++++++++++--- .../hdfs/server/balancer/TestBalancer.java | 8 ++++-- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 0024ba56913d7..33650eac2a9af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -38,6 +38,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdfs.DFSUtilClient; import org.slf4j.Logger; @@ -638,13 +639,15 @@ static class Result { private final long bytesLeftToMove; private final long bytesBeingMoved; private final long bytesAlreadyMoved; + private final long blocksMoved; Result(ExitStatus exitStatus, long bytesLeftToMove, long bytesBeingMoved, - long bytesAlreadyMoved) { + long bytesAlreadyMoved, long blocksMoved) { this.exitStatus = exitStatus; this.bytesLeftToMove = bytesLeftToMove; this.bytesBeingMoved = bytesBeingMoved; this.bytesAlreadyMoved = bytesAlreadyMoved; + this.blocksMoved = blocksMoved; } public ExitStatus getExitStatus() { @@ -663,23 +666,40 @@ public long getBytesAlreadyMoved() { return bytesAlreadyMoved; } + public long getBlocksMoved() { + return blocksMoved; + } + void print(int iteration, NameNodeConnector nnc, PrintStream out) { - out.printf("%-24s %10d %19s %18s %17s %s%n", + out.printf("%-24s %10d %19s %18s %17s %17s %s%n", DateFormat.getDateTimeInstance().format(new Date()), iteration, StringUtils.byteDesc(bytesAlreadyMoved), StringUtils.byteDesc(bytesLeftToMove), StringUtils.byteDesc(bytesBeingMoved), + blocksMoved, nnc.getNameNodeUri()); } + + @Override + public String toString() { + return new ToStringBuilder(this) + .append("exitStatus", exitStatus) + .append("bytesLeftToMove", bytesLeftToMove) + .append("bytesBeingMoved", bytesBeingMoved) + .append("bytesAlreadyMoved", bytesAlreadyMoved) + .append("blocksMoved", blocksMoved) + .toString(); + } } Result newResult(ExitStatus exitStatus, long bytesLeftToMove, long bytesBeingMoved) { return new Result(exitStatus, bytesLeftToMove, bytesBeingMoved, - dispatcher.getBytesMoved()); + dispatcher.getBytesMoved(), dispatcher.getBblocksMoved()); } Result newResult(ExitStatus exitStatus) { - return new Result(exitStatus, -1, -1, dispatcher.getBytesMoved()); + return new Result(exitStatus, -1, -1, dispatcher.getBytesMoved(), + dispatcher.getBblocksMoved()); } /** Run an iteration for all datanodes. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 343faf671f382..f59743fdaa73e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -1658,6 +1658,7 @@ public void testMaxIterationTime() throws Exception { // a block is moved unexpectedly, IN_PROGRESS will be reported. assertEquals("We expect ExitStatus.NO_MOVE_PROGRESS to be reported.", ExitStatus.NO_MOVE_PROGRESS, r.getExitStatus()); + assertEquals(0, r.getBlocksMoved()); } } finally { for (NameNodeConnector nnc : connectors) { @@ -2309,8 +2310,11 @@ public void testBalancerWithSortTopNodes() throws Exception { // Hence, overall total blocks moved by HDFS balancer would be either of these 2 options: // a) 2 blocks of total size (100B + 100B) // b) 3 blocks of total size (50B + 100B + 100B) - assertTrue(balancerResult.getBytesAlreadyMoved() == 200 - || balancerResult.getBytesAlreadyMoved() == 250); + assertTrue("BalancerResult is not as expected. " + balancerResult, + (balancerResult.getBytesAlreadyMoved() == 200 + && balancerResult.getBlocksMoved() == 2) + || (balancerResult.getBytesAlreadyMoved() == 250 + && balancerResult.getBlocksMoved() == 3)); // 100% and 95% used nodes will be balanced, so top used will be 900 assertEquals(900, maxUsage); } From 124c196ff5a91145ee7e1ff981f9d1f79f67d91b Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Wed, 24 Mar 2021 08:15:06 +0100 Subject: [PATCH 140/666] YARN-10674. fs2cs should generate auto-created queue deletion properties. Contributed by Qi Zhu. --- .../FSConfigToCSConfigArgumentHandler.java | 29 ++++ .../FSConfigToCSConfigConverter.java | 15 ++- .../FSConfigToCSConfigConverterParams.java | 40 ++++++ .../fair/converter/FSYarnSiteConverter.java | 48 ++++++- .../TestFSConfigToCSConfigConverter.java | 15 +++ .../converter/TestFSYarnSiteConverter.java | 127 ++++++++++++++++-- 6 files changed, 261 insertions(+), 13 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java index 084b67d394e88..41e7ba5203778 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java @@ -118,6 +118,14 @@ public enum CliOption { "pc", "percentage", "Converts FS queue weights to percentages", false), + DISABLE_PREEMPTION("disable preemption", "dp", "disable-preemption", + "Disable the preemption with nopolicy or observeonly mode. " + + "Preemption is enabled by default. " + + "nopolicy removes ProportionalCapacityPreemptionPolicy from " + + "the list of monitor policies, " + + "observeonly sets " + + "yarn.resourcemanager.monitor.capacity.preemption.observe_only " + + "to true.", true), HELP("help", "h", "help", "Displays the list of options", false); private final String name; @@ -251,6 +259,12 @@ private FSConfigToCSConfigConverterParams validateInputFiles( cliParser.getOptionValue(CliOption.CONVERSION_RULES.shortSwitch); String outputDir = cliParser.getOptionValue(CliOption.OUTPUT_DIR.shortSwitch); + FSConfigToCSConfigConverterParams. + PreemptionMode preemptionMode = + FSConfigToCSConfigConverterParams. + PreemptionMode.fromString(cliParser. + getOptionValue(CliOption.DISABLE_PREEMPTION.shortSwitch)); + boolean convertPlacementRules = !cliParser.hasOption( CliOption.SKIP_PLACEMENT_RULES_CONVERSION.shortSwitch); @@ -260,6 +274,10 @@ private FSConfigToCSConfigConverterParams validateInputFiles( checkFile(CliOption.CONVERSION_RULES, conversionRulesFile); checkDirectory(CliOption.OUTPUT_DIR, outputDir); checkOutputDirDoesNotContainXmls(yarnSiteXmlFile, outputDir); + if (cliParser.hasOption(CliOption. + DISABLE_PREEMPTION.shortSwitch)) { + checkDisablePreemption(preemptionMode); + } // check mapping-rules.json if we intend to generate it if (!cliParser.hasOption(CliOption.CONSOLE_MODE.shortSwitch) && @@ -281,6 +299,7 @@ private FSConfigToCSConfigConverterParams validateInputFiles( cliParser.hasOption(CliOption.RULES_TO_FILE.shortSwitch)) .withUsePercentages( cliParser.hasOption(CliOption.CONVERT_PERCENTAGES.shortSwitch)) + .withDisablePreemption(preemptionMode) .build(); } @@ -383,6 +402,16 @@ private static void checkFileInternal(CliOption cliOption, String filePath, } } + private static void checkDisablePreemption(FSConfigToCSConfigConverterParams. + PreemptionMode preemptionMode) { + if (preemptionMode == FSConfigToCSConfigConverterParams. + PreemptionMode.ENABLED) { + throw new PreconditionException( + "Specified disable-preemption mode is illegal, " + + " use nopolicy or observeonly."); + } + } + private FSConfigToCSConfigConverter getConverter() { return new FSConfigToCSConfigConverter(ruleHandler, conversionOptions); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java index a7602343db2b0..d5d4d908152ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java @@ -103,6 +103,8 @@ public class FSConfigToCSConfigConverter { private String outputDirectory; private boolean rulesToFile; private boolean usePercentages; + private FSConfigToCSConfigConverterParams. + PreemptionMode preemptionMode; public FSConfigToCSConfigConverter(FSConfigToCSConfigRuleHandler ruleHandler, ConversionOptions conversionOptions) { @@ -121,6 +123,7 @@ public void convert(FSConfigToCSConfigConverterParams params) this.outputDirectory = params.getOutputDirectory(); this.rulesToFile = params.isPlacementRulesToFile(); this.usePercentages = params.isUsePercentages(); + this.preemptionMode = params.getPreemptionMode(); prepareOutputFiles(params.isConsole()); loadConversionRules(params.getConversionRulesConfig()); Configuration inputYarnSiteConfig = getInputYarnSiteConfig(params); @@ -277,7 +280,8 @@ private void convertYarnSiteXml(Configuration inputYarnSiteConfig) { new FSYarnSiteConverter(); siteConverter.convertSiteProperties(inputYarnSiteConfig, convertedYarnSiteConfig, drfUsed, - conversionOptions.isEnableAsyncScheduler()); + conversionOptions.isEnableAsyncScheduler(), + usePercentages, preemptionMode); preemptionEnabled = siteConverter.isPreemptionEnabled(); sizeBasedWeight = siteConverter.isSizeBasedWeight(); @@ -291,6 +295,7 @@ private void convertCapacitySchedulerXml(FairScheduler fs) { emitDefaultUserMaxParallelApplications(); emitUserMaxParallelApplications(); emitDefaultMaxAMShare(); + emitDisablePreemptionForObserveOnlyMode(); FSQueueConverter queueConverter = FSQueueConverterBuilder.create() .withRuleHandler(ruleHandler) @@ -407,6 +412,14 @@ private void emitDefaultMaxAMShare() { queueMaxAMShareDefault); } } + private void emitDisablePreemptionForObserveOnlyMode() { + if (preemptionMode == FSConfigToCSConfigConverterParams + .PreemptionMode.OBSERVE_ONLY) { + capacitySchedulerConfig. + setBoolean(CapacitySchedulerConfiguration. + PREEMPTION_OBSERVE_ONLY, true); + } + } private void emitACLs(FairScheduler fs) { fs.getAllocationConfiguration().getQueueAcls() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverterParams.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverterParams.java index 1f515302788df..bc6adfacf4900 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverterParams.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverterParams.java @@ -30,6 +30,35 @@ public final class FSConfigToCSConfigConverterParams { private boolean convertPlacementRules; private boolean placementRulesToFile; private boolean usePercentages; + private PreemptionMode preemptionMode; + + public enum PreemptionMode { + ENABLED("enabled"), + NO_POLICY("nopolicy"), + OBSERVE_ONLY("observeonly"); + + private String cliOption; + + PreemptionMode(String cliOption) { + this.cliOption = cliOption; + } + + public String getCliOption() { + return cliOption; + } + + public static PreemptionMode fromString(String cliOption) { + if (cliOption != null && cliOption.trim(). + equals(PreemptionMode.OBSERVE_ONLY.getCliOption())) { + return PreemptionMode.OBSERVE_ONLY; + } else if (cliOption != null && cliOption.trim(). + equals(PreemptionMode.NO_POLICY.getCliOption())) { + return PreemptionMode.NO_POLICY; + } else { + return PreemptionMode.ENABLED; + } + } + } private FSConfigToCSConfigConverterParams() { //must use builder @@ -71,6 +100,10 @@ public boolean isUsePercentages() { return usePercentages; } + public PreemptionMode getPreemptionMode() { + return preemptionMode; + } + @Override public String toString() { return "FSConfigToCSConfigConverterParams{" + @@ -99,6 +132,7 @@ public static final class Builder { private boolean convertPlacementRules; private boolean placementRulesToFile; private boolean usePercentages; + private PreemptionMode preemptionMode; private Builder() { } @@ -152,6 +186,11 @@ public Builder withUsePercentages(boolean usePercentages) { return this; } + public Builder withDisablePreemption(PreemptionMode preemptionMode) { + this.preemptionMode = preemptionMode; + return this; + } + public FSConfigToCSConfigConverterParams build() { FSConfigToCSConfigConverterParams params = new FSConfigToCSConfigConverterParams(); @@ -164,6 +203,7 @@ public FSConfigToCSConfigConverterParams build() { params.convertPlacementRules = this.convertPlacementRules; params.placementRulesToFile = this.placementRulesToFile; params.usePercentages = this.usePercentages; + params.preemptionMode = this.preemptionMode; return params; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java index 4222e3af6c2ea..401c056cc999e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java @@ -20,6 +20,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AutoCreatedQueueDeletionPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; @@ -36,7 +38,9 @@ public class FSYarnSiteConverter { @SuppressWarnings({"deprecation", "checkstyle:linelength"}) public void convertSiteProperties(Configuration conf, - Configuration yarnSiteConfig, boolean drfUsed, boolean enableAsyncScheduler) { + Configuration yarnSiteConfig, boolean drfUsed, + boolean enableAsyncScheduler, boolean userPercentage, + FSConfigToCSConfigConverterParams.PreemptionMode preemptionMode) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName()); @@ -52,12 +56,20 @@ public void convertSiteProperties(Configuration conf, "schedule-asynchronously.scheduling-interval-ms", interval); } + // This should be always true to trigger cs auto + // refresh queue. + yarnSiteConfig.setBoolean( + YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); + if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION, FairSchedulerConfiguration.DEFAULT_PREEMPTION)) { - yarnSiteConfig.setBoolean( - YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); preemptionEnabled = true; + String policies = addMonitorPolicy(ProportionalCapacityPreemptionPolicy. + class.getCanonicalName(), yarnSiteConfig); + yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, + policies); + int waitTimeBeforeKill = conf.getInt( FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL); @@ -71,6 +83,23 @@ public void convertSiteProperties(Configuration conf, yarnSiteConfig.setLong( CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, waitBeforeNextStarvationCheck); + } else { + if (preemptionMode == + FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY) { + yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, ""); + } + } + + // For auto created queue's auto deletion. + if (!userPercentage) { + String policies = addMonitorPolicy(AutoCreatedQueueDeletionPolicy. + class.getCanonicalName(), yarnSiteConfig); + yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, + policies); + + // Set the expired for deletion interval to 10s, consistent with fs. + yarnSiteConfig.setInt(CapacitySchedulerConfiguration. + AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, 10); } if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, @@ -132,4 +161,17 @@ public boolean isPreemptionEnabled() { public boolean isSizeBasedWeight() { return sizeBasedWeight; } + + private String addMonitorPolicy(String policyName, + Configuration yarnSiteConfig) { + String policies = + yarnSiteConfig.get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES); + if (policies == null || policies.isEmpty()) { + policies = policyName; + } else { + policies += "," + policyName; + } + return policies; + } + } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java index 5450d409bdabb..c1c774bed0942 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java @@ -687,6 +687,21 @@ public void testConversionWhenAsyncSchedulingIsDisabled() throws Exception { schedulingEnabledValue); } + @Test + public void testSiteDisabledPreemptionWithObserveOnlyConversion() + throws Exception{ + FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder() + .withDisablePreemption(FSConfigToCSConfigConverterParams. + PreemptionMode.OBSERVE_ONLY) + .build(); + + converter.convert(params); + assertTrue("The observe only should be true", + converter.getCapacitySchedulerConfig(). + getBoolean(CapacitySchedulerConfiguration. + PREEMPTION_OBSERVE_ONLY, false)); + } + private boolean testConversionWithAsyncSchedulingOption(boolean enabled) throws Exception { FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder() .withClusterResource(CLUSTER_RESOURCE_STRING) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java index 9cebf16f8be16..55ac242332309 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java @@ -18,6 +18,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AutoCreatedQueueDeletionPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; @@ -28,6 +30,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotEquals; /** * Unit tests for FSYarnSiteConverter. @@ -37,6 +40,8 @@ public class TestFSYarnSiteConverter { private Configuration yarnConfig; private FSYarnSiteConverter converter; private Configuration yarnConvertedConfig; + private static final String DELETION_POLICY_CLASS = + AutoCreatedQueueDeletionPolicy.class.getCanonicalName(); @Before public void setup() { @@ -54,7 +59,7 @@ public void testSiteContinuousSchedulingConversion() { FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, 666); converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, - false); + false, false, null); assertTrue("Cont. scheduling", yarnConvertedConfig.getBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false)); @@ -73,7 +78,7 @@ public void testSitePreemptionConversion() { 321); converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, - false); + false, false, null); assertTrue("Preemption enabled", yarnConvertedConfig.getBoolean( @@ -87,6 +92,41 @@ public void testSitePreemptionConversion() { yarnConvertedConfig.getInt( CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, -1)); + + assertFalse("Observe_only should be false", + yarnConvertedConfig.getBoolean(CapacitySchedulerConfiguration. + PREEMPTION_OBSERVE_ONLY, false)); + + assertTrue("Should contain ProportionalCapacityPreemptionPolicy.", + yarnConvertedConfig. + get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES). + contains(ProportionalCapacityPreemptionPolicy. + class.getCanonicalName())); + } + + @Test + public void testSiteDisabledPreemptionWithNoPolicyConversion() { + // Default mode is nopolicy + yarnConfig.setBoolean(FairSchedulerConfiguration.PREEMPTION, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false, false, null); + + assertFalse("Should not contain ProportionalCapacityPreemptionPolicy.", + yarnConvertedConfig. + get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES). + contains(ProportionalCapacityPreemptionPolicy. + class.getCanonicalName())); + + yarnConfig.setBoolean(FairSchedulerConfiguration.PREEMPTION, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false, false, + FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY); + + assertFalse("Should not contain ProportionalCapacityPreemptionPolicy.", + yarnConvertedConfig. + get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES). + contains(ProportionalCapacityPreemptionPolicy. + class.getCanonicalName())); } @Test @@ -94,7 +134,7 @@ public void testSiteAssignMultipleConversion() { yarnConfig.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true); converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, - false); + false, false, null); assertTrue("Assign multiple", yarnConvertedConfig.getBoolean( @@ -107,7 +147,7 @@ public void testSiteMaxAssignConversion() { yarnConfig.setInt(FairSchedulerConfiguration.MAX_ASSIGN, 111); converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, - false); + false, false, null); assertEquals("Max assign", 111, yarnConvertedConfig.getInt( @@ -122,7 +162,7 @@ public void testSiteLocalityThresholdConversion() { "321.321"); converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, - false); + false, false, null); assertEquals("Locality threshold node", "123.123", yarnConvertedConfig.get( @@ -135,7 +175,7 @@ public void testSiteLocalityThresholdConversion() { @Test public void testSiteDrfEnabledConversion() { converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, true, - false); + false, false, null); assertEquals("Resource calculator type", DominantResourceCalculator.class, yarnConvertedConfig.getClass( @@ -145,7 +185,7 @@ public void testSiteDrfEnabledConversion() { @Test public void testSiteDrfDisabledConversion() { converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, - false); + false, false, null); assertEquals("Resource calculator type", DefaultResourceCalculator.class, yarnConvertedConfig.getClass( @@ -156,7 +196,7 @@ public void testSiteDrfDisabledConversion() { @Test public void testAsyncSchedulingEnabledConversion() { converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, true, - true); + true, false, null); assertTrue("Asynchronous scheduling", yarnConvertedConfig.getBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, @@ -166,10 +206,79 @@ public void testAsyncSchedulingEnabledConversion() { @Test public void testAsyncSchedulingDisabledConversion() { converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, - false); + false, false, null); assertFalse("Asynchronous scheduling", yarnConvertedConfig.getBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE)); } + + @Test + public void testSiteQueueAutoDeletionConversionWithWeightMode() { + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false, false, null); + assertTrue(yarnConvertedConfig.get(YarnConfiguration. + RM_SCHEDULER_ENABLE_MONITORS), true); + assertTrue("Scheduling Policies contain auto deletion policy", + yarnConvertedConfig. + get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES) + .contains(DELETION_POLICY_CLASS)); + + // Test when policy has existed. + yarnConvertedConfig. + set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, + "testPolicy"); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false, false, null); + assertTrue("Scheduling Policies contain auto deletion policy", + yarnConvertedConfig. + get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES) + .contains(DELETION_POLICY_CLASS)); + + assertEquals("Auto deletion policy expired time should be 10s", + 10, yarnConvertedConfig. + getLong(CapacitySchedulerConfiguration. + AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, + CapacitySchedulerConfiguration. + DEFAULT_AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME)); + } + + @Test + public void + testSiteQueueAutoDeletionConversionDisabledForPercentageMode() { + + // test percentage mode + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false, true, null); + assertTrue(yarnConvertedConfig.get(YarnConfiguration. + RM_SCHEDULER_ENABLE_MONITORS), true); + + assertTrue("Scheduling Policies should not" + + "contain auto deletion policy in percentage mode", + yarnConvertedConfig. + get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES) == null || + !yarnConvertedConfig. + get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES). + contains(DELETION_POLICY_CLASS)); + + yarnConvertedConfig. + set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, + "testPolicy"); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false, true, null); + assertFalse("Scheduling Policies should not " + + "contain auto deletion policy in percentage mode", + yarnConvertedConfig. + get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES) + .contains(DELETION_POLICY_CLASS)); + + assertNotEquals("Auto deletion policy expired time should not " + + "be set in percentage mode", + 10, yarnConvertedConfig. + getLong(CapacitySchedulerConfiguration. + AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, + CapacitySchedulerConfiguration. + DEFAULT_AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME)); + + } } From da68b856a8fbb5ddb15550d731a0724c25e346c4 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Wed, 24 Mar 2021 17:51:35 +0900 Subject: [PATCH 141/666] HDFS-15902. Improve the log for HTTPFS server operation. Contributed by Bhavik Patel. --- .../web/DelegationTokenAuthenticatedURL.java | 6 ++---- .../apache/hadoop/fs/http/server/HttpFSServer.java | 11 ++++------- .../lib/service/hadoop/FileSystemAccessService.java | 10 ++++++---- 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java index 07507f9dab212..eb84c63bb319d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java @@ -295,10 +295,8 @@ public HttpURLConnection openConnection(URL url, Token token, String doAs) // delegation token Credentials creds = UserGroupInformation.getCurrentUser(). getCredentials(); - if (LOG.isDebugEnabled()) { - LOG.debug("Token not set, looking for delegation token. Creds:{}," - + " size:{}", creds.getAllTokens(), creds.numberOfTokens()); - } + LOG.debug("Token not set, looking for delegation token. Creds:{}," + + " size:{}", creds.getAllTokens(), creds.numberOfTokens()); if (!creds.getAllTokens().isEmpty()) { dToken = selectDelegationToken(url, creds); if (dToken != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 0e7038b635d35..d0d76d6289912 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -285,7 +285,7 @@ public InputStream run() throws Exception { } }); } catch (InterruptedException ie) { - LOG.info("Open interrupted.", ie); + LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); @@ -318,7 +318,7 @@ public InputStream run() throws Exception { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); - AUDIT_LOG.info(""); + AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } @@ -340,7 +340,7 @@ public InputStream run() throws Exception { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); - AUDIT_LOG.info("[{}]", path); + AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } @@ -348,7 +348,7 @@ public InputStream run() throws Exception { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); - AUDIT_LOG.info("[{}]", path); + AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } @@ -657,14 +657,11 @@ public Response post(InputStream is, break; } case CONCAT: { - System.out.println("HTTPFS SERVER CONCAT"); String sources = params.get(SourcesParam.NAME, SourcesParam.class); - FSOperations.FSConcat command = new FSOperations.FSConcat(path, sources.split(",")); fsExecute(user, command); AUDIT_LOG.info("[{}]", path); - System.out.println("SENT RESPONSE"); response = Response.ok().build(); break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java index 094526a9a00bb..b0c10603172a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java @@ -194,9 +194,11 @@ protected void init() throws ServiceException { throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex); } - LOG.debug("FileSystemAccess FileSystem configuration:"); - for (Map.Entry entry : serviceHadoopConf) { - LOG.debug(" {} = {}", entry.getKey(), entry.getValue()); + if (LOG.isDebugEnabled()) { + LOG.debug("FileSystemAccess FileSystem configuration:"); + for (Map.Entry entry : serviceHadoopConf) { + LOG.debug(" {} = {}", entry.getKey(), entry.getValue()); + } } setRequiredServiceHadoopConf(serviceHadoopConf); @@ -262,7 +264,7 @@ public void run() { LOG.warn("Error while purging filesystem, " + ex.toString(), ex); } } - LOG.debug("Purged [{}} filesystem instances", count); + LOG.debug("Purged [{}] filesystem instances", count); } } From 76bd740c3efc3046131771aa464625124aa29002 Mon Sep 17 00:00:00 2001 From: touchida <56789230+touchida@users.noreply.github.com> Date: Wed, 24 Mar 2021 17:56:09 +0900 Subject: [PATCH 142/666] HDFS-15759. EC: Verify EC reconstruction correctness on DataNode (#2585) --- .../rawcoder/DecodingValidator.java | 187 ++++++++++++++ .../rawcoder/InvalidDecodingException.java | 35 +++ .../src/site/markdown/Metrics.md | 1 + .../hadoop/io/erasurecode/TestCoderBase.java | 12 + .../rawcoder/TestDecodingValidator.java | 237 ++++++++++++++++++ .../rawcoder/TestRawCoderBase.java | 2 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 + .../datanode/DataNodeFaultInjector.java | 7 + .../StripedBlockChecksumReconstructor.java | 12 +- .../StripedBlockReconstructor.java | 26 +- .../erasurecode/StripedReconstructor.java | 46 +++- .../datanode/metrics/DataNodeMetrics.java | 10 + .../src/main/resources/hdfs-default.xml | 10 + .../hdfs/TestReconstructStripedFile.java | 24 +- ...stReconstructStripedFileWithValidator.java | 115 +++++++++ 15 files changed, 720 insertions(+), 7 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DecodingValidator.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/InvalidDecodingException.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestDecodingValidator.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFileWithValidator.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DecodingValidator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DecodingValidator.java new file mode 100644 index 0000000000000..9597058b6fe74 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DecodingValidator.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.io.erasurecode.rawcoder; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.io.erasurecode.ECChunk; + +import java.io.IOException; +import java.nio.ByteBuffer; + +/** + * A utility class to validate decoding. + */ +@InterfaceAudience.Private +public class DecodingValidator { + + private final RawErasureDecoder decoder; + private ByteBuffer buffer; + private int[] newValidIndexes; + private int newErasedIndex; + + public DecodingValidator(RawErasureDecoder decoder) { + this.decoder = decoder; + } + + /** + * Validate outputs decoded from inputs, by decoding an input back from + * the outputs and comparing it with the original one. + * + * For instance, in RS (6, 3), let (d0, d1, d2, d3, d4, d5) be sources + * and (p0, p1, p2) be parities, and assume + * inputs = [d0, null (d1), d2, d3, d4, d5, null (p0), p1, null (p2)]; + * erasedIndexes = [1, 6]; + * outputs = [d1, p0]. + * Then + * 1. Create new inputs, erasedIndexes and outputs for validation so that + * the inputs could contain the decoded outputs, and decode them: + * newInputs = [d1, d2, d3, d4, d5, p0] + * newErasedIndexes = [0] + * newOutputs = [d0'] + * 2. Compare d0 and d0'. The comparison will fail with high probability + * when the initial outputs are wrong. + * + * Note that the input buffers' positions must be the ones where data are + * read: If the input buffers have been processed by a decoder, the buffers' + * positions must be reset before being passed into this method. + * + * This method does not change outputs and erasedIndexes. + * + * @param inputs input buffers used for decoding. The buffers' position + * are moved to the end after this method. + * @param erasedIndexes indexes of erased units used for decoding + * @param outputs decoded output buffers, which are ready to be read after + * the call + * @throws IOException + */ + public void validate(ByteBuffer[] inputs, int[] erasedIndexes, + ByteBuffer[] outputs) throws IOException { + markBuffers(outputs); + + try { + ByteBuffer validInput = CoderUtil.findFirstValidInput(inputs); + boolean isDirect = validInput.isDirect(); + int capacity = validInput.capacity(); + int remaining = validInput.remaining(); + + // Init buffer + if (buffer == null || buffer.isDirect() != isDirect + || buffer.capacity() < remaining) { + buffer = allocateBuffer(isDirect, capacity); + } + buffer.clear().limit(remaining); + + // Create newInputs and newErasedIndex for validation + ByteBuffer[] newInputs = new ByteBuffer[inputs.length]; + int count = 0; + for (int i = 0; i < erasedIndexes.length; i++) { + newInputs[erasedIndexes[i]] = outputs[i]; + count++; + } + newErasedIndex = -1; + boolean selected = false; + int numValidIndexes = CoderUtil.getValidIndexes(inputs).length; + for (int i = 0; i < newInputs.length; i++) { + if (count == numValidIndexes) { + break; + } else if (!selected && inputs[i] != null) { + newErasedIndex = i; + newInputs[i] = null; + selected = true; + } else if (newInputs[i] == null) { + newInputs[i] = inputs[i]; + if (inputs[i] != null) { + count++; + } + } + } + + // Keep it for testing + newValidIndexes = CoderUtil.getValidIndexes(newInputs); + + decoder.decode(newInputs, new int[]{newErasedIndex}, + new ByteBuffer[]{buffer}); + + if (!buffer.equals(inputs[newErasedIndex])) { + throw new InvalidDecodingException("Failed to validate decoding"); + } + } finally { + toLimits(inputs); + resetBuffers(outputs); + } + } + + /** + * Validate outputs decoded from inputs, by decoding an input back from + * those outputs and comparing it with the original one. + * @param inputs input buffers used for decoding + * @param erasedIndexes indexes of erased units used for decoding + * @param outputs decoded output buffers + * @throws IOException + */ + public void validate(ECChunk[] inputs, int[] erasedIndexes, ECChunk[] outputs) + throws IOException { + ByteBuffer[] newInputs = CoderUtil.toBuffers(inputs); + ByteBuffer[] newOutputs = CoderUtil.toBuffers(outputs); + validate(newInputs, erasedIndexes, newOutputs); + } + + private ByteBuffer allocateBuffer(boolean direct, int capacity) { + if (direct) { + buffer = ByteBuffer.allocateDirect(capacity); + } else { + buffer = ByteBuffer.allocate(capacity); + } + return buffer; + } + + private static void markBuffers(ByteBuffer[] buffers) { + for (ByteBuffer buffer: buffers) { + if (buffer != null) { + buffer.mark(); + } + } + } + + private static void resetBuffers(ByteBuffer[] buffers) { + for (ByteBuffer buffer: buffers) { + if (buffer != null) { + buffer.reset(); + } + } + } + + private static void toLimits(ByteBuffer[] buffers) { + for (ByteBuffer buffer: buffers) { + if (buffer != null) { + buffer.position(buffer.limit()); + } + } + } + + @VisibleForTesting + protected int[] getNewValidIndexes() { + return newValidIndexes; + } + + @VisibleForTesting + protected int getNewErasedIndex() { + return newErasedIndex; + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/InvalidDecodingException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/InvalidDecodingException.java new file mode 100644 index 0000000000000..37869f8eeded0 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/InvalidDecodingException.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.io.erasurecode.rawcoder; + +import org.apache.hadoop.classification.InterfaceAudience; + +import java.io.IOException; + +/** + * Thrown for invalid decoding. + */ +@InterfaceAudience.Private +public class InvalidDecodingException + extends IOException { + private static final long serialVersionUID = 0L; + + public InvalidDecodingException(String description) { + super(description); + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md index b93a11ce1c9f9..cc80e15ed2ebf 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md @@ -449,6 +449,7 @@ Each metrics record contains tags such as SessionId and Hostname as additional i | `BlocksDeletedInPendingIBR` | Number of blocks at deleted status in pending incremental block report (IBR) | | `EcReconstructionTasks` | Total number of erasure coding reconstruction tasks | | `EcFailedReconstructionTasks` | Total number of erasure coding failed reconstruction tasks | +| `EcInvalidReconstructionTasks` | Total number of erasure coding invalidated reconstruction tasks | | `EcDecodingTimeNanos` | Total number of nanoseconds spent by decoding tasks | | `EcReconstructionBytesRead` | Total number of bytes read by erasure coding worker | | `EcReconstructionBytesWritten` | Total number of bytes written by erasure coding worker | diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java index caab72ceff662..811148464b7cb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java @@ -519,4 +519,16 @@ protected void corruptSomeChunk(ECChunk[] chunks) { buffer.position(buffer.position() + 1); } } + + /** + * Pollute some chunk. + * @param chunks + */ + protected void polluteSomeChunk(ECChunk[] chunks) { + int idx = new Random().nextInt(chunks.length); + ByteBuffer buffer = chunks[idx].getBuffer(); + buffer.mark(); + buffer.put((byte) ((buffer.get(buffer.position()) + 1))); + buffer.reset(); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestDecodingValidator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestDecodingValidator.java new file mode 100644 index 0000000000000..06744cccc0a54 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestDecodingValidator.java @@ -0,0 +1,237 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.io.erasurecode.rawcoder; + +import org.apache.hadoop.io.erasurecode.ECChunk; +import org.apache.hadoop.io.erasurecode.ErasureCodeNative; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.junit.Assert.assertTrue; + +/** + * Test {@link DecodingValidator} under various decoders. + */ +@RunWith(Parameterized.class) +public class TestDecodingValidator extends TestRawCoderBase { + + private DecodingValidator validator; + + @Parameterized.Parameters + public static Collection data() { + return Arrays.asList(new Object[][] { + {RSRawErasureCoderFactory.class, 6, 3, new int[]{1}, new int[]{}}, + {RSRawErasureCoderFactory.class, 6, 3, new int[]{3}, new int[]{0}}, + {RSRawErasureCoderFactory.class, 6, 3, new int[]{2, 4}, new int[]{1}}, + {NativeRSRawErasureCoderFactory.class, 6, 3, new int[]{0}, new int[]{}}, + {XORRawErasureCoderFactory.class, 10, 1, new int[]{0}, new int[]{}}, + {NativeXORRawErasureCoderFactory.class, 10, 1, new int[]{0}, + new int[]{}} + }); + } + + public TestDecodingValidator( + Class factoryClass, int numDataUnits, + int numParityUnits, int[] erasedDataIndexes, int[] erasedParityIndexes) { + this.encoderFactoryClass = factoryClass; + this.decoderFactoryClass = factoryClass; + this.numDataUnits = numDataUnits; + this.numParityUnits = numParityUnits; + this.erasedDataIndexes = erasedDataIndexes; + this.erasedParityIndexes = erasedParityIndexes; + } + + @Before + public void setup() { + if (encoderFactoryClass == NativeRSRawErasureCoderFactory.class + || encoderFactoryClass == NativeXORRawErasureCoderFactory.class) { + Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded()); + } + setAllowDump(false); + } + + /** + * Test if the same validator can process direct and non-direct buffers. + */ + @Test + public void testValidate() { + prepare(null, numDataUnits, numParityUnits, erasedDataIndexes, + erasedParityIndexes); + testValidate(true); + testValidate(false); + } + + /** + * Test if the same validator can process variable width of data for + * inputs and outputs. + */ + protected void testValidate(boolean usingDirectBuffer) { + this.usingDirectBuffer = usingDirectBuffer; + prepareCoders(false); + prepareValidator(false); + + performTestValidate(baseChunkSize); + performTestValidate(baseChunkSize - 17); + performTestValidate(baseChunkSize + 18); + } + + protected void prepareValidator(boolean recreate) { + if (validator == null || recreate) { + validator = new DecodingValidator(decoder); + } + } + + protected void performTestValidate(int chunkSize) { + setChunkSize(chunkSize); + prepareBufferAllocator(false); + + // encode + ECChunk[] dataChunks = prepareDataChunksForEncoding(); + ECChunk[] parityChunks = prepareParityChunksForEncoding(); + ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks); + try { + encoder.encode(dataChunks, parityChunks); + } catch (Exception e) { + Assert.fail("Should not get Exception: " + e.getMessage()); + } + + // decode + backupAndEraseChunks(clonedDataChunks, parityChunks); + ECChunk[] inputChunks = + prepareInputChunksForDecoding(clonedDataChunks, parityChunks); + markChunks(inputChunks); + ensureOnlyLeastRequiredChunks(inputChunks); + ECChunk[] recoveredChunks = prepareOutputChunksForDecoding(); + int[] erasedIndexes = getErasedIndexesForDecoding(); + try { + decoder.decode(inputChunks, erasedIndexes, recoveredChunks); + } catch (Exception e) { + Assert.fail("Should not get Exception: " + e.getMessage()); + } + + // validate + restoreChunksFromMark(inputChunks); + ECChunk[] clonedInputChunks = cloneChunksWithData(inputChunks); + ECChunk[] clonedRecoveredChunks = cloneChunksWithData(recoveredChunks); + int[] clonedErasedIndexes = erasedIndexes.clone(); + + try { + validator.validate(clonedInputChunks, clonedErasedIndexes, + clonedRecoveredChunks); + } catch (Exception e) { + Assert.fail("Should not get Exception: " + e.getMessage()); + } + + // Check if input buffers' positions are moved to the end + verifyBufferPositionAtEnd(clonedInputChunks); + + // Check if validator does not change recovered chunks and erased indexes + verifyChunksEqual(recoveredChunks, clonedRecoveredChunks); + Assert.assertArrayEquals("Erased indexes should not be changed", + erasedIndexes, clonedErasedIndexes); + + // Check if validator uses correct indexes for validation + List validIndexesList = + IntStream.of(CoderUtil.getValidIndexes(inputChunks)).boxed() + .collect(Collectors.toList()); + List newValidIndexesList = + IntStream.of(validator.getNewValidIndexes()).boxed() + .collect(Collectors.toList()); + List erasedIndexesList = + IntStream.of(erasedIndexes).boxed().collect(Collectors.toList()); + int newErasedIndex = validator.getNewErasedIndex(); + Assert.assertTrue( + "Valid indexes for validation should contain" + + " erased indexes for decoding", + newValidIndexesList.containsAll(erasedIndexesList)); + Assert.assertTrue( + "An erased index for validation should be contained" + + " in valid indexes for decoding", + validIndexesList.contains(newErasedIndex)); + Assert.assertFalse( + "An erased index for validation should not be contained" + + " in valid indexes for validation", + newValidIndexesList.contains(newErasedIndex)); + } + + private void verifyChunksEqual(ECChunk[] chunks1, ECChunk[] chunks2) { + boolean result = Arrays.deepEquals(toArrays(chunks1), toArrays(chunks2)); + assertTrue("Recovered chunks should not be changed", result); + } + + /** + * Test if validator throws {@link InvalidDecodingException} when + * a decoded output buffer is polluted. + */ + @Test + public void testValidateWithBadDecoding() throws IOException { + prepare(null, numDataUnits, numParityUnits, erasedDataIndexes, + erasedParityIndexes); + this.usingDirectBuffer = true; + prepareCoders(true); + prepareValidator(true); + prepareBufferAllocator(false); + + // encode + ECChunk[] dataChunks = prepareDataChunksForEncoding(); + ECChunk[] parityChunks = prepareParityChunksForEncoding(); + ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks); + try { + encoder.encode(dataChunks, parityChunks); + } catch (Exception e) { + Assert.fail("Should not get Exception: " + e.getMessage()); + } + + // decode + backupAndEraseChunks(clonedDataChunks, parityChunks); + ECChunk[] inputChunks = + prepareInputChunksForDecoding(clonedDataChunks, parityChunks); + markChunks(inputChunks); + ensureOnlyLeastRequiredChunks(inputChunks); + ECChunk[] recoveredChunks = prepareOutputChunksForDecoding(); + int[] erasedIndexes = getErasedIndexesForDecoding(); + try { + decoder.decode(inputChunks, erasedIndexes, recoveredChunks); + } catch (Exception e) { + Assert.fail("Should not get Exception: " + e.getMessage()); + } + + // validate + restoreChunksFromMark(inputChunks); + polluteSomeChunk(recoveredChunks); + try { + validator.validate(inputChunks, erasedIndexes, recoveredChunks); + Assert.fail("Validation should fail due to bad decoding"); + } catch (InvalidDecodingException e) { + String expected = "Failed to validate decoding"; + GenericTestUtils.assertExceptionContains(expected, e); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestRawCoderBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestRawCoderBase.java index 4519e357bd181..eb63494507eaf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestRawCoderBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestRawCoderBase.java @@ -334,7 +334,7 @@ protected void testInputPosition(boolean usingDirectBuffer) { verifyBufferPositionAtEnd(inputChunks); } - private void verifyBufferPositionAtEnd(ECChunk[] inputChunks) { + void verifyBufferPositionAtEnd(ECChunk[] inputChunks) { for (ECChunk chunk : inputChunks) { if (chunk != null) { Assert.assertEquals(0, chunk.getBuffer().remaining()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index c29d91c421af4..b2c94544811d4 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -867,6 +867,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.datanode.ec.reconstruction.xmits.weight"; public static final float DFS_DN_EC_RECONSTRUCTION_XMITS_WEIGHT_DEFAULT = 0.5f; + public static final String DFS_DN_EC_RECONSTRUCTION_VALIDATION_KEY = + "dfs.datanode.ec.reconstruction.validation"; + public static final boolean DFS_DN_EC_RECONSTRUCTION_VALIDATION_VALUE = false; public static final String DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_KEY = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java index 58c589e4d2554..e3b9cf0399c55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java @@ -23,6 +23,7 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.nio.ByteBuffer; /** * Used for injecting faults in DFSClient and DFSOutputStream tests. @@ -142,4 +143,10 @@ public void blockUtilSendFullBlockReport() {} * Just delay a while. */ public void delay() {} + + /** + * Used as a hook to inject data pollution + * into an erasure coding reconstruction. + */ + public void badDecoding(ByteBuffer[] outputs) {} } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockChecksumReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockChecksumReconstructor.java index e28d6c556b803..a196935219ec5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockChecksumReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockChecksumReconstructor.java @@ -57,6 +57,7 @@ protected StripedBlockChecksumReconstructor(ErasureCodingWorker worker, private void init() throws IOException { initDecoderIfNecessary(); + initDecodingValidatorIfNecessary(); getStripedReader().init(); // allocate buffer to keep the reconstructed block data targetBuffer = allocateBuffer(getBufferSize()); @@ -192,7 +193,16 @@ private void reconstructTargets(int toReconstructLen) throws IOException { for (int i = 0; i < targetIndices.length; i++) { tarIndices[i] = targetIndices[i]; } - getDecoder().decode(inputs, tarIndices, outputs); + + if (isValidationEnabled()) { + markBuffers(inputs); + getDecoder().decode(inputs, tarIndices, outputs); + resetBuffers(inputs); + + getValidator().validate(inputs, tarIndices, outputs); + } else { + getDecoder().decode(inputs, tarIndices, outputs); + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java index 1af2380886ac3..cd59f515283d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; +import org.apache.hadoop.io.erasurecode.rawcoder.InvalidDecodingException; import org.apache.hadoop.util.Time; /** @@ -53,6 +54,8 @@ public void run() { try { initDecoderIfNecessary(); + initDecodingValidatorIfNecessary(); + getStripedReader().init(); stripedWriter.init(); @@ -126,12 +129,31 @@ private void reconstructTargets(int toReconstructLen) throws IOException { int[] erasedIndices = stripedWriter.getRealTargetIndices(); ByteBuffer[] outputs = stripedWriter.getRealTargetBuffers(toReconstructLen); + if (isValidationEnabled()) { + markBuffers(inputs); + decode(inputs, erasedIndices, outputs); + resetBuffers(inputs); + + DataNodeFaultInjector.get().badDecoding(outputs); + try { + getValidator().validate(inputs, erasedIndices, outputs); + } catch (InvalidDecodingException e) { + getDatanode().getMetrics().incrECInvalidReconstructionTasks(); + throw e; + } + } else { + decode(inputs, erasedIndices, outputs); + } + + stripedWriter.updateRealTargetBuffers(toReconstructLen); + } + + private void decode(ByteBuffer[] inputs, int[] erasedIndices, + ByteBuffer[] outputs) throws IOException { long start = System.nanoTime(); getDecoder().decode(inputs, erasedIndices, outputs); long end = System.nanoTime(); this.getDatanode().getMetrics().incrECDecodingTime(end - start); - - stripedWriter.updateRealTargetBuffers(toReconstructLen); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java index 851f695662082..06346c6b32467 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode.erasurecode; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.io.erasurecode.rawcoder.DecodingValidator; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -103,10 +105,14 @@ abstract class StripedReconstructor { private final Configuration conf; private final DataNode datanode; private final ErasureCodingPolicy ecPolicy; + private final ErasureCoderOptions coderOptions; private RawErasureDecoder decoder; private final ExtendedBlock blockGroup; private static final ByteBufferPool BUFFER_POOL = new ElasticByteBufferPool(); + private final boolean isValidationEnabled; + private DecodingValidator validator; + // position in striped internal block private long positionInBlock; private StripedReader stripedReader; @@ -136,6 +142,13 @@ abstract class StripedReconstructor { cachingStrategy = CachingStrategy.newDefaultStrategy(); positionInBlock = 0L; + + coderOptions = new ErasureCoderOptions( + ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits()); + isValidationEnabled = conf.getBoolean( + DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_VALIDATION_KEY, + DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_VALIDATION_VALUE) + && !coderOptions.allowChangeInputs(); } public void incrBytesRead(boolean local, long delta) { @@ -196,13 +209,18 @@ long getBlockLen(int i) { // Initialize decoder protected void initDecoderIfNecessary() { if (decoder == null) { - ErasureCoderOptions coderOptions = new ErasureCoderOptions( - ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits()); decoder = CodecUtil.createRawDecoder(conf, ecPolicy.getCodecName(), coderOptions); } } + // Initialize decoding validator + protected void initDecodingValidatorIfNecessary() { + if (isValidationEnabled && validator == null) { + validator = new DecodingValidator(decoder); + } + } + long getPositionInBlock() { return positionInBlock; } @@ -285,4 +303,28 @@ public ErasureCodingWorker getErasureCodingWorker() { static ByteBufferPool getBufferPool() { return BUFFER_POOL; } + + boolean isValidationEnabled() { + return isValidationEnabled; + } + + DecodingValidator getValidator() { + return validator; + } + + protected static void markBuffers(ByteBuffer[] buffers) { + for (ByteBuffer buffer: buffers) { + if (buffer != null) { + buffer.mark(); + } + } + } + + protected static void resetBuffers(ByteBuffer[] buffers) { + for (ByteBuffer buffer: buffers) { + if (buffer != null) { + buffer.reset(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java index 712d8f46ef7b0..2debc3ee0a7e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java @@ -152,6 +152,8 @@ public class DataNodeMetrics { MutableCounterLong ecReconstructionTasks; @Metric("Count of erasure coding failed reconstruction tasks") MutableCounterLong ecFailedReconstructionTasks; + @Metric("Count of erasure coding invalidated reconstruction tasks") + private MutableCounterLong ecInvalidReconstructionTasks; @Metric("Nanoseconds spent by decoding tasks") MutableCounterLong ecDecodingTimeNanos; @Metric("Bytes read by erasure coding worker") @@ -543,6 +545,14 @@ public void incrECFailedReconstructionTasks() { ecFailedReconstructionTasks.incr(); } + public void incrECInvalidReconstructionTasks() { + ecInvalidReconstructionTasks.incr(); + } + + public long getECInvalidReconstructionTasks() { + return ecInvalidReconstructionTasks.value(); + } + public void incrDataNodeActiveXceiversCount() { dataNodeActiveXceiversCount.incr(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index c3080581de9e3..df4df489bfabb 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3693,6 +3693,16 @@
+ + dfs.datanode.ec.reconstruction.validation + false + + Decide if datanode validates that EC reconstruction tasks reconstruct + target blocks correctly. When validation fails, reconstruction tasks + will fail and be retried by namenode. + + + dfs.namenode.quota.init-threads 4 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 902b899277330..67df6d8a8dbe6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -107,6 +107,23 @@ public ErasureCodingPolicy getEcPolicy() { return StripedFileTestUtil.getDefaultECPolicy(); } + public boolean isValidationEnabled() { + return false; + } + + public int getPendingTimeout() { + return DFSConfigKeys + .DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT; + } + + public int getBlockSize() { + return blockSize; + } + + public MiniDFSCluster getCluster() { + return cluster; + } + @Before public void setup() throws IOException { ecPolicy = getEcPolicy(); @@ -128,6 +145,11 @@ public void setup() throws IOException { CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY, NativeRSRawErasureCoderFactory.CODER_NAME); } + conf.setInt( + DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, + getPendingTimeout()); + conf.setBoolean(DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_VALIDATION_KEY, + isValidationEnabled()); File basedir = new File(GenericTestUtils.getRandomizedTempPath()); cluster = new MiniDFSCluster.Builder(conf, basedir).numDataNodes(dnNum) .build(); @@ -303,7 +325,7 @@ private static void writeFile(DistributedFileSystem fs, String fileName, * and verify the block replica length, generationStamp and content. * 2. Read the file and verify content. */ - private void assertFileBlocksReconstruction(String fileName, int fileLen, + void assertFileBlocksReconstruction(String fileName, int fileLen, ReconstructionType type, int toRecoverBlockNum) throws Exception { if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) { Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFileWithValidator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFileWithValidator.java new file mode 100644 index 0000000000000..00749efa4d0a4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFileWithValidator.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; +import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This test extends {@link TestReconstructStripedFile} to test + * ec reconstruction validation. + */ +public class TestReconstructStripedFileWithValidator + extends TestReconstructStripedFile { + private static final Logger LOG = + LoggerFactory.getLogger(TestReconstructStripedFileWithValidator.class); + + public TestReconstructStripedFileWithValidator() { + LOG.info("run {} with validator.", + TestReconstructStripedFileWithValidator.class.getSuperclass() + .getSimpleName()); + } + + /** + * This test injects data pollution into decoded outputs once. + * When validation enabled, the first reconstruction task should fail + * in the validation, but the data will be recovered correctly + * by the next task. + * On the other hand, when validation disabled, the first reconstruction task + * will succeed and then lead to data corruption. + */ + @Test(timeout = 120000) + public void testValidatorWithBadDecoding() + throws Exception { + MiniDFSCluster cluster = getCluster(); + + cluster.getDataNodes().stream() + .map(DataNode::getMetrics) + .map(DataNodeMetrics::getECInvalidReconstructionTasks) + .forEach(n -> Assert.assertEquals(0, (long) n)); + + DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get(); + DataNodeFaultInjector badDecodingInjector = new DataNodeFaultInjector() { + private final AtomicBoolean flag = new AtomicBoolean(false); + + @Override + public void badDecoding(ByteBuffer[] outputs) { + if (!flag.get()) { + for (ByteBuffer output : outputs) { + output.mark(); + output.put((byte) (output.get(output.position()) + 1)); + output.reset(); + } + } + flag.set(true); + } + }; + DataNodeFaultInjector.set(badDecodingInjector); + + int fileLen = + (getEcPolicy().getNumDataUnits() + getEcPolicy().getNumParityUnits()) + * getBlockSize() + getBlockSize() / 10; + try { + assertFileBlocksReconstruction( + "/testValidatorWithBadDecoding", + fileLen, + ReconstructionType.DataOnly, + getEcPolicy().getNumParityUnits()); + + long sum = cluster.getDataNodes().stream() + .map(DataNode::getMetrics) + .mapToLong(DataNodeMetrics::getECInvalidReconstructionTasks) + .sum(); + Assert.assertEquals(1, sum); + } finally { + DataNodeFaultInjector.set(oldInjector); + } + } + + @Override + public boolean isValidationEnabled() { + return true; + } + + /** + * Set a small value for the failed reconstruction task to be + * rescheduled in a short period of time. + */ + @Override + public int getPendingTimeout() { + return 10; + } +} From ad15aaf7bfc3d72dd7a0cdb007c5e68877ded3d3 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 24 Mar 2021 13:32:54 +0000 Subject: [PATCH 143/666] HADOOP-13551. AWS metrics wire-up (#2778) Moves to the builder API for AWS S3 client creation, and offers a similar style of API to the S3A FileSystem and tests, hiding the details of which options are client, which are in AWS Conf, and doing the wiring up of S3A statistics interfaces to the AWS SDK internals. S3A Statistics, including IOStatistics, should now count throttling events handled in the AWS SDK itself. This patch restores endpoint determination by probes to US-East-1 if the client isn't configured with fs.s3a.endpoint. Explicitly setting the endpoint will save the cost of these probe HTTP requests. Contributed by Steve Loughran. --- .../org/apache/hadoop/fs/s3a/Constants.java | 27 +- .../hadoop/fs/s3a/DefaultS3ClientFactory.java | 169 ++++--------- .../fs/s3a/InconsistentS3ClientFactory.java | 30 +-- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 26 +- .../apache/hadoop/fs/s3a/S3ClientFactory.java | 232 +++++++++++++++++- .../hadoop/fs/s3a/impl/InternalConstants.java | 6 - .../hadoop/fs/s3a/MockS3ClientFactory.java | 11 +- .../hadoop/fs/s3a/auth/ITestCustomSigner.java | 78 ++++-- .../ITestSessionDelegationInFileystem.java | 24 +- .../fs/s3a/commit/ITestCommitOperations.java | 1 + .../ITestAWSStatisticCollection.java | 82 +++++++ 11 files changed, 466 insertions(+), 220 deletions(-) create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index d14a82e5c304c..c4b8f6e3c4683 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -160,14 +160,33 @@ private Constants() { DEFAULT_SSL_CHANNEL_MODE = DelegatingSSLSocketFactory.SSLChannelMode.Default_JSSE; - //use a custom endpoint? + /** + * Endpoint. For v4 signing and/or better performance, + * this should be the specific endpoint of the region + * in which the bucket is hosted. + */ public static final String ENDPOINT = "fs.s3a.endpoint"; /** - * Default value of s3 endpoint. If not set explicitly using - * {@code AmazonS3#setEndpoint()}, this is used. + * Default value of s3 endpoint: {@value}. + * It tells the AWS client to work it out by asking the central + * endpoint where the bucket lives; caching that + * value in the client for the life of the process. + *

+ * Note: previously this constant was defined as + * {@link #CENTRAL_ENDPOINT}, however the actual + * S3A client code used "" as the default when + * {@link #ENDPOINT} was unset. + * As core-default.xml also set the endpoint to "", + * the empty string has long been the real + * default value. + */ + public static final String DEFAULT_ENDPOINT = ""; + + /** + * The central endpoint :{@value}. */ - public static final String DEFAULT_ENDPOINT = "s3.amazonaws.com"; + public static final String CENTRAL_ENDPOINT = "s3.amazonaws.com"; //Enable path style access? Overrides default virtual hosting public static final String PATH_STYLE_ACCESS = "fs.s3a.path.style.access"; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java index 96d16e8b1b8e0..ae50bd1459bcd 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java @@ -22,9 +22,8 @@ import java.net.URI; import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.metrics.RequestMetricCollector; +import com.amazonaws.handlers.RequestHandler2; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.AmazonS3ClientBuilder; @@ -41,18 +40,15 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.s3a.statistics.StatisticsFromAwsSdk; import org.apache.hadoop.fs.s3a.statistics.impl.AwsStatisticsCollector; import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING; -import static org.apache.hadoop.fs.s3a.Constants.ENDPOINT; import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING_DEFAULT; -import static org.apache.hadoop.fs.s3a.Constants.PATH_STYLE_ACCESS; /** * The default {@link S3ClientFactory} implementation. * This calls the AWS SDK to configure and create an - * {@link AmazonS3Client} that communicates with the S3 service. + * {@code AmazonS3Client} that communicates with the S3 service. */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -60,8 +56,6 @@ public class DefaultS3ClientFactory extends Configured implements S3ClientFactory { private static final String S3_SERVICE_NAME = "s3"; - private static final String S3_SIGNER = "S3SignerType"; - private static final String S3_V4_SIGNER = "AWSS3V4SignerType"; /** * Subclasses refer to this. @@ -70,22 +64,21 @@ public class DefaultS3ClientFactory extends Configured LoggerFactory.getLogger(DefaultS3ClientFactory.class); /** - * Create the client. - *

- * If the AWS stats are not null then a {@link AwsStatisticsCollector}. - * is created to bind to the two. - * Important: until this binding works properly across regions, - * this should be null. + * Create the client by preparing the AwsConf configuration + * and then invoking {@code buildAmazonS3Client()}. */ @Override - public AmazonS3 createS3Client(URI name, - final String bucket, - final AWSCredentialsProvider credentials, - final String userAgentSuffix, - final StatisticsFromAwsSdk statisticsFromAwsSdk) throws IOException { + public AmazonS3 createS3Client( + final URI uri, + final S3ClientCreationParameters parameters) throws IOException { Configuration conf = getConf(); final ClientConfiguration awsConf = S3AUtils - .createAwsConf(conf, bucket, Constants.AWS_SERVICE_IDENTIFIER_S3); + .createAwsConf(conf, + uri.getHost(), + Constants.AWS_SERVICE_IDENTIFIER_S3); + // add any headers + parameters.getHeaders().forEach((h, v) -> + awsConf.addHeader(h, v)); // When EXPERIMENTAL_AWS_INTERNAL_THROTTLING is false // throttling is explicitly disabled on the S3 client so that @@ -96,111 +89,62 @@ public AmazonS3 createS3Client(URI name, conf.getBoolean(EXPERIMENTAL_AWS_INTERNAL_THROTTLING, EXPERIMENTAL_AWS_INTERNAL_THROTTLING_DEFAULT)); - if (!StringUtils.isEmpty(userAgentSuffix)) { - awsConf.setUserAgentSuffix(userAgentSuffix); + if (!StringUtils.isEmpty(parameters.getUserAgentSuffix())) { + awsConf.setUserAgentSuffix(parameters.getUserAgentSuffix()); } - // optional metrics - RequestMetricCollector metrics = statisticsFromAwsSdk != null - ? new AwsStatisticsCollector(statisticsFromAwsSdk) - : null; - return newAmazonS3Client( - credentials, + return buildAmazonS3Client( awsConf, - metrics, - conf.getTrimmed(ENDPOINT, ""), - conf.getBoolean(PATH_STYLE_ACCESS, false)); - } - - /** - * Create an {@link AmazonS3} client. - * Override this to provide an extended version of the client - * @param credentials credentials to use - * @param awsConf AWS configuration - * @param metrics metrics collector or null - * @param endpoint endpoint string; may be "" - * @param pathStyleAccess enable path style access? - * @return new AmazonS3 client - */ - protected AmazonS3 newAmazonS3Client( - final AWSCredentialsProvider credentials, - final ClientConfiguration awsConf, - final RequestMetricCollector metrics, - final String endpoint, - final boolean pathStyleAccess) { - if (metrics != null) { - LOG.debug("Building S3 client using the SDK builder API"); - return buildAmazonS3Client(credentials, awsConf, metrics, endpoint, - pathStyleAccess); - } else { - LOG.debug("Building S3 client using the SDK builder API"); - return classicAmazonS3Client(credentials, awsConf, endpoint, - pathStyleAccess); - } + parameters); } /** - * Use the (newer) Builder SDK to create a an AWS S3 client. + * Use the Builder API to create an AWS S3 client. *

- * This has a more complex endpoint configuration in a - * way which does not yet work in this code in a way - * which doesn't trigger regressions. So it is only used - * when SDK metrics are supplied. - * @param credentials credentials to use + * This has a more complex endpoint configuration mechanism + * which initially caused problems; the + * {@code withForceGlobalBucketAccessEnabled(true)} + * command is critical here. * @param awsConf AWS configuration - * @param metrics metrics collector or null - * @param endpoint endpoint string; may be "" - * @param pathStyleAccess enable path style access? + * @param parameters parameters * @return new AmazonS3 client */ - private AmazonS3 buildAmazonS3Client( - final AWSCredentialsProvider credentials, + protected AmazonS3 buildAmazonS3Client( final ClientConfiguration awsConf, - final RequestMetricCollector metrics, - final String endpoint, - final boolean pathStyleAccess) { + final S3ClientCreationParameters parameters) { AmazonS3ClientBuilder b = AmazonS3Client.builder(); - b.withCredentials(credentials); + b.withCredentials(parameters.getCredentialSet()); b.withClientConfiguration(awsConf); - b.withPathStyleAccessEnabled(pathStyleAccess); - if (metrics != null) { - b.withMetricsCollector(metrics); + b.withPathStyleAccessEnabled(parameters.isPathStyleAccess()); + + if (parameters.getMetrics() != null) { + b.withMetricsCollector( + new AwsStatisticsCollector(parameters.getMetrics())); + } + if (parameters.getRequestHandlers() != null) { + b.withRequestHandlers( + parameters.getRequestHandlers().toArray(new RequestHandler2[0])); + } + if (parameters.getMonitoringListener() != null) { + b.withMonitoringListener(parameters.getMonitoringListener()); } // endpoint set up is a PITA - // client.setEndpoint("") is no longer available AwsClientBuilder.EndpointConfiguration epr - = createEndpointConfiguration(endpoint, awsConf); + = createEndpointConfiguration(parameters.getEndpoint(), + awsConf); if (epr != null) { // an endpoint binding was constructed: use it. b.withEndpointConfiguration(epr); + } else { + // no idea what the endpoint is, so tell the SDK + // to work it out at the cost of an extra HEAD request + b.withForceGlobalBucketAccessEnabled(true); } final AmazonS3 client = b.build(); return client; } - /** - * Wrapper around constructor for {@link AmazonS3} client. - * Override this to provide an extended version of the client. - *

- * This uses a deprecated constructor -it is currently - * the only one which works for us. - * @param credentials credentials to use - * @param awsConf AWS configuration - * @param endpoint endpoint string; may be "" - * @param pathStyleAccess enable path style access? - * @return new AmazonS3 client - */ - @SuppressWarnings("deprecation") - private AmazonS3 classicAmazonS3Client( - AWSCredentialsProvider credentials, - ClientConfiguration awsConf, - final String endpoint, - final boolean pathStyleAccess) { - final AmazonS3 client = new AmazonS3Client(credentials, awsConf); - return configureAmazonS3Client(client, endpoint, pathStyleAccess); - } - /** * Configure classic S3 client. *

@@ -226,31 +170,6 @@ protected static AmazonS3 configureAmazonS3Client(AmazonS3 s3, throw new IllegalArgumentException(msg, e); } } - return applyS3ClientOptions(s3, pathStyleAccess); - } - - /** - * Perform any tuning of the {@code S3ClientOptions} settings based on - * the Hadoop configuration. - * This is different from the general AWS configuration creation as - * it is unique to S3 connections. - *

- * The {@link Constants#PATH_STYLE_ACCESS} option enables path-style access - * to S3 buckets if configured. By default, the - * behavior is to use virtual hosted-style access with URIs of the form - * {@code http://bucketname.s3.amazonaws.com} - *

- * Enabling path-style access and a - * region-specific endpoint switches the behavior to use URIs of the form - * {@code http://s3-eu-west-1.amazonaws.com/bucketname}. - * It is common to use this when connecting to private S3 servers, as it - * avoids the need to play with DNS entries. - * @param s3 S3 client - * @param pathStyleAccess enable path style access? - * @return the S3 client - */ - protected static AmazonS3 applyS3ClientOptions(AmazonS3 s3, - final boolean pathStyleAccess) { if (pathStyleAccess) { LOG.debug("Enabling path style access!"); s3.setS3ClientOptions(S3ClientOptions.builder() diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java index ddc492235dba8..c11581f1d5d78 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java @@ -19,8 +19,6 @@ package org.apache.hadoop.fs.s3a; import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.metrics.RequestMetricCollector; import com.amazonaws.services.s3.AmazonS3; import org.apache.hadoop.classification.InterfaceAudience; @@ -31,31 +29,25 @@ * This client is for testing only; it is in the production * {@code hadoop-aws} module to enable integration tests to use this * just by editing the Hadoop configuration used to bring up the client. + * + * The factory uses the older constructor-based instantiation/configuration + * of the client, so does not wire up metrics, handlers etc. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class InconsistentS3ClientFactory extends DefaultS3ClientFactory { - /** - * Create the inconsistent client. - * Logs a warning that this is being done. - * @param credentials credentials to use - * @param awsConf AWS configuration - * @param metrics metric collector - * @param endpoint AWS endpoint - * @param pathStyleAccess should path style access be supported? - * @return an inconsistent client. - */ @Override - protected AmazonS3 newAmazonS3Client(AWSCredentialsProvider credentials, - ClientConfiguration awsConf, - final RequestMetricCollector metrics, - final String endpoint, - final boolean pathStyleAccess) { + protected AmazonS3 buildAmazonS3Client( + final ClientConfiguration awsConf, + final S3ClientCreationParameters parameters) { LOG.warn("** FAILURE INJECTION ENABLED. Do not run in production! **"); InconsistentAmazonS3Client s3 - = new InconsistentAmazonS3Client(credentials, awsConf, getConf()); - configureAmazonS3Client(s3, endpoint, pathStyleAccess); + = new InconsistentAmazonS3Client( + parameters.getCredentialSet(), awsConf, getConf()); + configureAmazonS3Client(s3, + parameters.getEndpoint(), + parameters.isPathStyleAccess()); return s3; } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index f625346957f4a..8db5d51def84e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -69,7 +69,6 @@ import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; - import com.amazonaws.services.s3.model.SSEAwsKeyManagementParams; import com.amazonaws.services.s3.model.SSECustomerKey; import com.amazonaws.services.s3.model.UploadPartRequest; @@ -83,7 +82,6 @@ import com.amazonaws.event.ProgressListener; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -166,7 +164,6 @@ import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; -import org.apache.hadoop.fs.s3a.statistics.StatisticsFromAwsSdk; import org.apache.hadoop.fs.s3a.statistics.impl.BondedS3AStatisticsContext; import org.apache.hadoop.fs.s3native.S3xLoginHelper; import org.apache.hadoop.io.retry.RetryPolicies; @@ -198,7 +195,6 @@ import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletionIgnoringExceptions; import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isObjectNotFound; import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isUnknownBucket; -import static org.apache.hadoop.fs.s3a.impl.InternalConstants.AWS_SDK_METRICS_ENABLED; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_404; import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.fixBucketRegion; import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.logDnsLookup; @@ -376,6 +372,11 @@ public void initialize(URI name, Configuration originalConf) LOG.debug("Initializing S3AFileSystem for {}", bucket); // clone the configuration into one with propagated bucket options Configuration conf = propagateBucketOptions(originalConf, bucket); + // fix up the classloader of the configuration to be whatever + // classloader loaded this filesystem. + // See: HADOOP-17372 + conf.setClassLoader(this.getClass().getClassLoader()); + // patch the Hadoop security providers patchSecurityCredentialProviders(conf); // look for delegation token support early. @@ -740,16 +741,17 @@ private void bindAWSClient(URI name, boolean dtEnabled) throws IOException { S3_CLIENT_FACTORY_IMPL, DEFAULT_S3_CLIENT_FACTORY_IMPL, S3ClientFactory.class); - StatisticsFromAwsSdk awsStats = null; - // TODO: HADOOP-16830 when the S3 client building code works - // with different regions, - // then non-null stats can be passed in here. - if (AWS_SDK_METRICS_ENABLED) { - awsStats = statisticsContext.newStatisticsFromAwsSdk(); - } + S3ClientFactory.S3ClientCreationParameters parameters = null; + parameters = new S3ClientFactory.S3ClientCreationParameters() + .withCredentialSet(credentials) + .withEndpoint(conf.getTrimmed(ENDPOINT, DEFAULT_ENDPOINT)) + .withMetrics(statisticsContext.newStatisticsFromAwsSdk()) + .withPathStyleAccess(conf.getBoolean(PATH_STYLE_ACCESS, false)) + .withUserAgentSuffix(uaSuffix); s3 = ReflectionUtils.newInstance(s3ClientFactoryClass, conf) - .createS3Client(getUri(), bucket, credentials, uaSuffix, awsStats); + .createS3Client(getUri(), + parameters); } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java index e04d3b5cbd269..dbb39fb662408 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java @@ -18,38 +18,246 @@ package org.apache.hadoop.fs.s3a; +import javax.annotation.Nullable; import java.io.IOException; import java.net.URI; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.handlers.RequestHandler2; +import com.amazonaws.monitoring.MonitoringListener; import com.amazonaws.services.s3.AmazonS3; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.s3a.statistics.StatisticsFromAwsSdk; +import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_ENDPOINT; + /** * Factory for creation of {@link AmazonS3} client instances. + * Important: HBase's HBoss module implements this interface in its + * tests. + * Take care when updating this interface to ensure that a client + * implementing only the deprecated method will work. + * See https://github.com/apache/hbase-filesystem + * */ -@InterfaceAudience.Private -@InterfaceStability.Unstable +@InterfaceAudience.LimitedPrivate("HBoss") +@InterfaceStability.Evolving public interface S3ClientFactory { /** * Creates a new {@link AmazonS3} client. * - * @param name raw input S3A file system URI - * @param bucket Optional bucket to use to look up per-bucket proxy secrets - * @param credentialSet credentials to use - * @param userAgentSuffix optional suffix for the UA field. - * @param statisticsFromAwsSdk binding for AWS stats - may be null + * @param uri S3A file system URI + * @param parameters parameter object * @return S3 client * @throws IOException IO problem */ - AmazonS3 createS3Client(URI name, - String bucket, - AWSCredentialsProvider credentialSet, - String userAgentSuffix, - StatisticsFromAwsSdk statisticsFromAwsSdk) throws IOException; + AmazonS3 createS3Client(URI uri, + S3ClientCreationParameters parameters) throws IOException; + + /** + * Settings for the S3 Client. + * Implemented as a class to pass in so that adding + * new parameters does not break the binding of + * external implementations of the factory. + */ + final class S3ClientCreationParameters { + + /** + * Credentials. + */ + private AWSCredentialsProvider credentialSet; + + /** + * Endpoint. + */ + private String endpoint = DEFAULT_ENDPOINT; + + /** + * Custom Headers. + */ + private final Map headers = new HashMap<>(); + + /** + * Monitoring listener. + */ + private MonitoringListener monitoringListener; + + /** + * RequestMetricCollector metrics...if not-null will be wrapped + * with an {@code AwsStatisticsCollector} and passed to + * the client. + */ + private StatisticsFromAwsSdk metrics; + + /** + * Use (deprecated) path style access. + */ + private boolean pathStyleAccess; + + /** + * This is in the settings awaiting wiring up and testing. + */ + private boolean requesterPays; + + /** + * Request handlers; used for auditing, X-Ray etc. + */ + private List requestHandlers; + + /** + * Suffix to UA. + */ + private String userAgentSuffix = ""; + + public List getRequestHandlers() { + return requestHandlers; + } + + /** + * List of request handlers. + * @param handlers handler list. + * @return this object + */ + public S3ClientCreationParameters withRequestHandlers( + @Nullable final List handlers) { + requestHandlers = handlers; + return this; + } + + public MonitoringListener getMonitoringListener() { + return monitoringListener; + } + + /** + * listener for AWS monitoring events. + * @param listener listener + * @return this object + */ + public S3ClientCreationParameters withMonitoringListener( + @Nullable final MonitoringListener listener) { + monitoringListener = listener; + return this; + } + + public StatisticsFromAwsSdk getMetrics() { + return metrics; + } + + /** + * Metrics binding. This is the S3A-level + * statistics interface, which will be wired + * up to the AWS callbacks. + * @param statistics statistics implementation + * @return this object + */ + public S3ClientCreationParameters withMetrics( + @Nullable final StatisticsFromAwsSdk statistics) { + metrics = statistics; + return this; + } + + /** + * Requester pays option. Not yet wired up. + * @param value new value + * @return the builder + */ + public S3ClientCreationParameters withRequesterPays( + final boolean value) { + requesterPays = value; + return this; + } + + public boolean isRequesterPays() { + return requesterPays; + } + + public AWSCredentialsProvider getCredentialSet() { + return credentialSet; + } + + /** + * Set credentials. + * @param value new value + * @return the builder + */ + + public S3ClientCreationParameters withCredentialSet( + final AWSCredentialsProvider value) { + credentialSet = value; + return this; + } + + public String getUserAgentSuffix() { + return userAgentSuffix; + } + + /** + * Set UA suffix. + * @param value new value + * @return the builder + */ + + public S3ClientCreationParameters withUserAgentSuffix( + final String value) { + userAgentSuffix = value; + return this; + } + + public String getEndpoint() { + return endpoint; + } + + /** + * Set endpoint. + * @param value new value + * @return the builder + */ + + public S3ClientCreationParameters withEndpoint( + final String value) { + endpoint = value; + return this; + } + + public boolean isPathStyleAccess() { + return pathStyleAccess; + } + + /** + * Set path access option. + * @param value new value + * @return the builder + */ + public S3ClientCreationParameters withPathStyleAccess( + final boolean value) { + pathStyleAccess = value; + return this; + } + + /** + * Add a custom header. + * @param header header name + * @param value new value + * @return the builder + */ + public S3ClientCreationParameters withHeader( + String header, String value) { + headers.put(header, value); + return this; + } + /** + * Get the map of headers. + * @return (mutable) header map + */ + public Map getHeaders() { + return headers; + } + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java index 358ec261ccaae..a5ce1f68ad3fc 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java @@ -111,10 +111,4 @@ private InternalConstants() { */ public static final int DEFAULT_UPLOAD_PART_COUNT_LIMIT = 10000; - /** - * Flag to enable AWS Statistics binding. As this is triggering - * problems related to region/endpoint setup, it is currently - * disabled. - */ - public static final boolean AWS_SDK_METRICS_ENABLED = true; } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java index 868ec2c36cd03..bd121ba2728eb 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java @@ -23,13 +23,10 @@ import java.net.URI; import java.util.ArrayList; -import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.MultipartUploadListing; import com.amazonaws.services.s3.model.Region; -import org.apache.hadoop.fs.s3a.statistics.StatisticsFromAwsSdk; - /** * An {@link S3ClientFactory} that returns Mockito mocks of the {@link AmazonS3} * interface suitable for unit testing. @@ -37,12 +34,10 @@ public class MockS3ClientFactory implements S3ClientFactory { @Override - public AmazonS3 createS3Client(URI name, - final String bucket, - final AWSCredentialsProvider credentialSet, - final String userAgentSuffix, - final StatisticsFromAwsSdk statisticsFromAwsSdks) { + public AmazonS3 createS3Client(URI uri, + final S3ClientCreationParameters parameters) { AmazonS3 s3 = mock(AmazonS3.class); + String bucket = uri.getHost(); when(s3.doesBucketExist(bucket)).thenReturn(true); when(s3.doesBucketExistV2(bucket)).thenReturn(true); // this listing is used in startup if purging is enabled, so diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java index d9cb1d97bf5fc..72af1752b1253 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java @@ -23,12 +23,11 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import com.amazonaws.SignableRequest; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.Signer; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.AWSS3V4Signer; import org.assertj.core.api.Assertions; import org.junit.Test; @@ -40,14 +39,15 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; -import org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider; +import org.apache.hadoop.fs.s3a.Constants; +import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.auth.ITestCustomSigner.CustomSignerInitializer.StoreValue; import org.apache.hadoop.fs.s3a.auth.delegation.DelegationTokenProvider; import org.apache.hadoop.security.UserGroupInformation; import static org.apache.hadoop.fs.s3a.Constants.CUSTOM_SIGNERS; import static org.apache.hadoop.fs.s3a.Constants.SIGNING_ALGORITHM_S3; -import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.fixBucketRegion; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching; /** * Tests for custom Signers and SignerInitializers. @@ -62,23 +62,32 @@ public class ITestCustomSigner extends AbstractS3ATestBase { private String regionName; + private String endpoint; + @Override public void setup() throws Exception { super.setup(); - regionName = determineRegion(getFileSystem().getBucket()); + final S3AFileSystem fs = getFileSystem(); + regionName = determineRegion(fs.getBucket()); LOG.info("Determined region name to be [{}] for bucket [{}]", regionName, - getFileSystem().getBucket()); + fs.getBucket()); + endpoint = fs.getConf() + .get(Constants.ENDPOINT, Constants.CENTRAL_ENDPOINT); + LOG.info("Test endpoint is {}", endpoint); } @Test public void testCustomSignerAndInitializer() throws IOException, InterruptedException { + final Path basePath = path(getMethodName()); UserGroupInformation ugi1 = UserGroupInformation.createRemoteUser("user1"); - FileSystem fs1 = runMkDirAndVerify(ugi1, "/customsignerpath1", "id1"); + FileSystem fs1 = runMkDirAndVerify(ugi1, + new Path(basePath, "customsignerpath1"), "id1"); UserGroupInformation ugi2 = UserGroupInformation.createRemoteUser("user2"); - FileSystem fs2 = runMkDirAndVerify(ugi2, "/customsignerpath2", "id2"); + FileSystem fs2 = runMkDirAndVerify(ugi2, + new Path(basePath, "customsignerpath2"), "id2"); Assertions.assertThat(CustomSignerInitializer.knownStores.size()) .as("Num registered stores mismatch").isEqualTo(2); @@ -91,20 +100,19 @@ public void testCustomSignerAndInitializer() } private FileSystem runMkDirAndVerify(UserGroupInformation ugi, - String pathString, String identifier) + Path finalPath, String identifier) throws IOException, InterruptedException { Configuration conf = createTestConfig(identifier); - Path path = new Path(pathString); - path = path.makeQualified(getFileSystem().getUri(), - getFileSystem().getWorkingDirectory()); - - Path finalPath = path; return ugi.doAs((PrivilegedExceptionAction) () -> { - int invocationCount = CustomSigner.invocationCount; + int instantiationCount = CustomSigner.getInstantiationCount(); + int invocationCount = CustomSigner.getInvocationCount(); FileSystem fs = finalPath.getFileSystem(conf); fs.mkdirs(finalPath); - Assertions.assertThat(CustomSigner.invocationCount) - .as("Invocation count lower than expected") + Assertions.assertThat(CustomSigner.getInstantiationCount()) + .as("CustomSigner Instantiation count lower than expected") + .isGreaterThan(instantiationCount); + Assertions.assertThat(CustomSigner.getInvocationCount()) + .as("CustomSigner Invocation count lower than expected") .isGreaterThan(invocationCount); Assertions.assertThat(CustomSigner.lastStoreValue) @@ -118,6 +126,12 @@ private FileSystem runMkDirAndVerify(UserGroupInformation ugi, }); } + /** + * Create a test conf with the custom signer; fixes up + * endpoint to be that of the test FS. + * @param identifier test key. + * @return a configuration for a filesystem. + */ private Configuration createTestConfig(String identifier) { Configuration conf = createConfiguration(); @@ -128,24 +142,38 @@ private Configuration createTestConfig(String identifier) { conf.set(TEST_ID_KEY, identifier); conf.set(TEST_REGION_KEY, regionName); + conf.set(Constants.ENDPOINT, endpoint); + // make absolutely sure there is no caching. + disableFilesystemCaching(conf); return conf; } private String determineRegion(String bucketName) throws IOException { - String region = getFileSystem().getBucketLocation(bucketName); - return fixBucketRegion(region); + return getFileSystem().getBucketLocation(bucketName); } @Private public static final class CustomSigner implements Signer { - private static int invocationCount = 0; + + private static final AtomicInteger INSTANTIATION_COUNT = + new AtomicInteger(0); + private static final AtomicInteger INVOCATION_COUNT = + new AtomicInteger(0); + private static StoreValue lastStoreValue; + public CustomSigner() { + int c = INSTANTIATION_COUNT.incrementAndGet(); + LOG.info("Creating Signer #{}", c); + } + @Override public void sign(SignableRequest request, AWSCredentials credentials) { - invocationCount++; + int c = INVOCATION_COUNT.incrementAndGet(); + LOG.info("Signing request #{}", c); + String host = request.getEndpoint().getHost(); String bucketName = host.split("\\.")[0]; try { @@ -159,6 +187,14 @@ public void sign(SignableRequest request, AWSCredentials credentials) { realSigner.setRegionName(lastStoreValue.conf.get(TEST_REGION_KEY)); realSigner.sign(request, credentials); } + + public static int getInstantiationCount() { + return INSTANTIATION_COUNT.get(); + } + + public static int getInvocationCount() { + return INVOCATION_COUNT.get(); + } } @Private diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java index f5562bdf32e5e..26655de9d4417 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java @@ -43,8 +43,8 @@ import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.fs.s3a.S3ClientFactory; import org.apache.hadoop.fs.s3a.Statistic; -import org.apache.hadoop.fs.s3a.statistics.StatisticsFromAwsSdk; import org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext; import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher; import org.apache.hadoop.io.Text; @@ -72,7 +72,6 @@ import static org.apache.hadoop.fs.s3a.auth.delegation.MiniKerberizedHadoopCluster.ALICE; import static org.apache.hadoop.fs.s3a.auth.delegation.MiniKerberizedHadoopCluster.assertSecurityEnabled; import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.lookupS3ADelegationToken; -import static org.apache.hadoop.fs.s3a.impl.InternalConstants.AWS_SDK_METRICS_ENABLED; import static org.apache.hadoop.test.LambdaTestUtils.doAs; import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.hamcrest.Matchers.containsString; @@ -557,23 +556,22 @@ public void testDelegationBindingMismatch2() throws Throwable { */ protected ObjectMetadata readLandsatMetadata(final S3AFileSystem delegatedFS) throws Exception { - AWSCredentialProviderList testing + AWSCredentialProviderList testingCreds = delegatedFS.shareCredentials("testing"); URI landsat = new URI(DEFAULT_CSVTEST_FILE); DefaultS3ClientFactory factory = new DefaultS3ClientFactory(); - Configuration conf = new Configuration(delegatedFS.getConf()); - conf.set(ENDPOINT, ""); - factory.setConf(conf); + factory.setConf(new Configuration(delegatedFS.getConf())); String host = landsat.getHost(); - StatisticsFromAwsSdk awsStats = null; - if (AWS_SDK_METRICS_ENABLED) { - awsStats = new EmptyS3AStatisticsContext() - .newStatisticsFromAwsSdk(); - } - AmazonS3 s3 = factory.createS3Client(landsat, host, testing, - "ITestSessionDelegationInFileystem", awsStats); + S3ClientFactory.S3ClientCreationParameters parameters = null; + parameters = new S3ClientFactory.S3ClientCreationParameters() + .withCredentialSet(testingCreds) + .withEndpoint(DEFAULT_ENDPOINT) + .withMetrics(new EmptyS3AStatisticsContext() + .newStatisticsFromAwsSdk()) + .withUserAgentSuffix("ITestSessionDelegationInFileystem"); + AmazonS3 s3 = factory.createS3Client(landsat, parameters); return Invoker.once("HEAD", host, () -> s3.getObjectMetadata(host, landsat.getPath().substring(1))); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java index b025f6f0969fe..4d7f81d019b74 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java @@ -175,6 +175,7 @@ public void testCreateAbortEmptyFile() throws Throwable { Path destFile = methodPath(filename); Path pendingFilePath = makeMagic(destFile); touch(fs, pendingFilePath); + waitForConsistency(); validateIntermediateAndFinalPaths(pendingFilePath, destFile); Path pendingDataPath = validatePendingCommitData(filename, pendingFilePath); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java new file mode 100644 index 0000000000000..e7696996dbd1a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.statistics.IOStatistics; + +import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_ENDPOINT; +import static org.apache.hadoop.fs.s3a.Constants.ENDPOINT; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getLandsatCSVPath; +import static org.apache.hadoop.fs.s3a.Statistic.STORE_IO_REQUEST; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticCounter; + +/** + * Verify that AWS SDK statistics are wired up. + * This test tries to read data from US-east-1 and us-west-2 buckets + * so as to be confident that the nuances of region mapping + * are handed correctly (HADOOP-13551). + * The statistics are probed to verify that the wiring up is complete. + */ +public class ITestAWSStatisticCollection extends AbstractS3ATestBase { + + private static final Path COMMON_CRAWL_PATH + = new Path("s3a://osm-pds/planet/planet-latest.orc"); + + @Test + public void testLandsatStatistics() throws Throwable { + final Configuration conf = getConfiguration(); + // skips the tests if the landsat path isn't the default. + Path path = getLandsatCSVPath(conf); + conf.set(ENDPOINT, DEFAULT_ENDPOINT); + conf.unset("fs.s3a.bucket.landsat-pds.endpoint"); + + try (S3AFileSystem fs = (S3AFileSystem) path.getFileSystem(conf)) { + fs.getObjectMetadata(path); + IOStatistics iostats = fs.getIOStatistics(); + assertThatStatisticCounter(iostats, + STORE_IO_REQUEST.getSymbol()) + .isGreaterThanOrEqualTo(1); + } + } + + @Test + public void testCommonCrawlStatistics() throws Throwable { + final Configuration conf = getConfiguration(); + // skips the tests if the landsat path isn't the default. + getLandsatCSVPath(conf); + + Path path = COMMON_CRAWL_PATH; + conf.set(ENDPOINT, DEFAULT_ENDPOINT); + + try (S3AFileSystem fs = (S3AFileSystem) path.getFileSystem(conf)) { + fs.getObjectMetadata(path); + IOStatistics iostats = fs.getIOStatistics(); + assertThatStatisticCounter(iostats, + STORE_IO_REQUEST.getSymbol()) + .isGreaterThanOrEqualTo(1); + } + } + +} From cfb5aff0cbdad3df67525d08108ff9c77263652e Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 24 Mar 2021 16:47:55 +0000 Subject: [PATCH 144/666] HADOOP-17476. ITestAssumeRole.testAssumeRoleBadInnerAuth failure. (#2777) Contributed by Steve Loughran. --- .../java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java index 814292c45d83b..771578001235e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java @@ -255,8 +255,7 @@ public void testAssumeRoleBadInnerAuth() throws Exception { conf.set(SECRET_KEY, "not secret"); expectFileSystemCreateFailure(conf, AWSBadRequestException.class, - "not a valid " + - "key=value pair (missing equal-sign) in Authorization header"); + "IncompleteSignature"); } @Test From eb9b8ab4ab462cdd60f48ebb24c1457c1a622cd2 Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Wed, 24 Mar 2021 22:27:02 +0530 Subject: [PATCH 145/666] HADOOP-15566 initial changes for opentelemetry --- hadoop-common-project/hadoop-common/pom.xml | 25 ++++++ .../java/org/apache/hadoop/tracing/Span.java | 27 ++++++- .../apache/hadoop/tracing/SpanContext.java | 58 +++++++++++++- .../org/apache/hadoop/tracing/TraceScope.java | 14 +++- .../org/apache/hadoop/tracing/TraceUtils.java | 41 +++++++++- .../org/apache/hadoop/tracing/Tracer.java | 77 ++++++++++++++++--- hadoop-project/pom.xml | 2 +- 7 files changed, 227 insertions(+), 17 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 0a2232dae98fa..2fc4832fcb3e4 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -371,6 +371,31 @@ lz4-java provided + + io.opentelemetry + opentelemetry-api + 1.0.0 + + + io.opentelemetry + opentelemetry-sdk + 1.0.0 + + + io.opentelemetry + opentelemetry-sdk-extension-autoconfigure + 1.0.0-alpha + + + io.opentelemetry + opentelemetry-exporter-jaeger + 1.0.0 + + + io.grpc + grpc-netty-shaded + 1.26.0 + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java index 197b29fa3dfe4..d6ddf964ba693 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java @@ -17,28 +17,51 @@ */ package org.apache.hadoop.tracing; +import io.opentelemetry.context.Scope; + import java.io.Closeable; public class Span implements Closeable { - + io.opentelemetry.api.trace.Span span = null; public Span() { } + public Span(io.opentelemetry.api.trace.Span span){ + this.span = span; + } + public Span addKVAnnotation(String key, String value) { + if(span != null){ + span.setAttribute(key, value); + } return this; } public Span addTimelineAnnotation(String msg) { + if(span != null){ + span.addEvent(msg); + } return this; } public SpanContext getContext() { - return null; + return new SpanContext(span.getSpanContext()); } public void finish() { + close(); } public void close() { + if(span != null){ + span.end(); + } + } + + public Scope makeCurrent() { + if(span != null){ + return span.makeCurrent(); + } + return null; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java index 363e94dc85dba..f4f6ff8da0453 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanContext.java @@ -18,15 +18,69 @@ package org.apache.hadoop.tracing; import java.io.Closeable; +import java.util.HashMap; +import java.util.Map; + +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.TraceFlags; +import io.opentelemetry.api.trace.TraceState; +import io.opentelemetry.api.trace.TraceStateBuilder; +import org.apache.hadoop.thirdparty.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Wrapper class for SpanContext to avoid using OpenTracing/OpenTelemetry * SpanContext class directly for better separation. */ -public class SpanContext implements Closeable { - public SpanContext() { +public class SpanContext implements Closeable { + public static final Logger LOG = LoggerFactory.getLogger(SpanContext.class.getName()); + private static final String TRACE_ID = "TRACE_ID"; + private static final String SPAN_ID = "SPAN_ID"; + private static final String TRACE_FLAGS = "TRACE_FLAGS"; + + + private io.opentelemetry.api.trace.SpanContext spanContext = null; + public SpanContext(io.opentelemetry.api.trace.SpanContext spanContext) { + this.spanContext = spanContext; } public void close() { + + } + + public Map getKVSpanContext(){ + if(spanContext != null){ + //TODO: may we should move this to Proto + Map kvMap = new HashMap<>(); + kvMap.put(TRACE_ID, spanContext.getTraceId()); + kvMap.put(SPAN_ID, spanContext.getSpanId()); + kvMap.put(TRACE_FLAGS, spanContext.getTraceFlags().asHex()); + kvMap.putAll(spanContext.getTraceState().asMap()); + return kvMap; + } + return null; + } + + static SpanContext buildFromKVMap(Map kvMap){ + String traceId = kvMap.get(TRACE_ID); + kvMap.remove(TRACE_ID); + String spanId = kvMap.get(SPAN_ID); + kvMap.remove(SPAN_ID); + String traceFlagsHex = kvMap.get(TRACE_FLAGS); + kvMap.remove(TRACE_FLAGS); + TraceFlags traceFlags = TraceFlags.fromHex(traceFlagsHex, 0); + TraceStateBuilder traceStateBuilder = TraceState.builder(); + for(Map.Entry keyValue: kvMap.entrySet()){ + traceStateBuilder.put(keyValue.getKey(), keyValue.getValue()); + } + TraceState traceState = traceStateBuilder.build(); + io.opentelemetry.api.trace.SpanContext spanContext = io.opentelemetry.api.trace.SpanContext.createFromRemoteParent(traceId, spanId, traceFlags, traceState ); + + return new SpanContext(spanContext); + } + + public io.opentelemetry.api.trace.SpanContext getSpanContext() { + return spanContext; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceScope.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceScope.java index 2abf9cb7ec2bf..ff4e104d1fee0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceScope.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceScope.java @@ -17,23 +17,32 @@ */ package org.apache.hadoop.tracing; +import io.opentelemetry.context.Scope; + import java.io.Closeable; public class TraceScope implements Closeable { Span span; - + Scope scope; public TraceScope(Span span) { this.span = span; + if(span != null){ + this.scope = span.makeCurrent(); + } } // Add tag to the span public void addKVAnnotation(String key, String value) { + span.addKVAnnotation(key, value); } public void addKVAnnotation(String key, Number value) { + span.addKVAnnotation(key, value.toString()); } + public void addTimelineAnnotation(String msg) { + span.addTimelineAnnotation(msg); } public Span span() { @@ -51,6 +60,9 @@ public void detach() { } public void close() { + if(scope != null){ + scope.close(); + } if (span != null) { span.close(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java index b218493780ee1..7ba73e461a916 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java @@ -20,12 +20,19 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.thirdparty.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.*; +import java.util.HashMap; +import java.util.Map; /** * This class provides utility functions for tracing. */ @InterfaceAudience.Private public class TraceUtils { + public static final Logger LOG = LoggerFactory.getLogger(TraceUtils.class.getName()); static final String DEFAULT_HADOOP_TRACE_PREFIX = "hadoop.htrace."; public static TraceConfiguration wrapHadoopConf(final String prefix, @@ -38,10 +45,40 @@ public static Tracer createAndRegisterTracer(String name) { } public static SpanContext byteStringToSpanContext(ByteString byteString) { - return null; + return deserialize(byteString); } public static ByteString spanContextToByteString(SpanContext context) { - return null; + Map kvMap = context.getKVSpanContext(); + ByteString byteString = serialize(kvMap); + return byteString; + } + + //Added this for tracing will remove this after having + // a discussion + static ByteString serialize(Object obj){ + try{ + ByteArrayOutputStream out = new ByteArrayOutputStream(); + ObjectOutputStream os = new ObjectOutputStream(out); + os.writeObject(obj); + os.flush(); + byte[] byteArray = out.toByteArray(); + return ByteString.copyFrom(byteArray); + } catch (Exception e){ + LOG.error("Error in searializing the object:", e); + return null; + } + } + + static SpanContext deserialize(ByteString spanContextByteString) { + try { + ByteArrayInputStream in = new ByteArrayInputStream(spanContextByteString.toByteArray()); + ObjectInputStream is = new ObjectInputStream(in); + Map kvMap = (Map) is.readObject(); + return SpanContext.buildFromKVMap(kvMap); + } catch (Exception e) { + LOG.error("Error in deserializing the object:", e); + return null; + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index a99b004b542f4..d24c9175bf65b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -17,20 +17,39 @@ */ package org.apache.hadoop.tracing; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.context.Context; +import io.opentelemetry.exporter.jaeger.JaegerGrpcSpanExporter; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.TimeUnit; + /** * No-Op Tracer (for now) to remove HTrace without changing too many files. */ public class Tracer { + public static final Logger LOG = LoggerFactory.getLogger(Tracer.class.getName()); // Singleton - private static final Tracer globalTracer = null; + private static Tracer globalTracer = null; + io.opentelemetry.api.trace.Tracer OTelTracer; private final NullTraceScope nullTraceScope; private final String name; public final static String SPAN_RECEIVER_CLASSES_KEY = "span.receiver.classes"; - public Tracer(String name) { + private Tracer(String name, io.opentelemetry.api.trace.Tracer tracer) { this.name = name; + OTelTracer = tracer; nullTraceScope = NullTraceScope.INSTANCE; } @@ -45,28 +64,32 @@ public static Tracer curThreadTracer() { * @return org.apache.hadoop.tracing.Span */ public static Span getCurrentSpan() { - return null; + io.opentelemetry.api.trace.Span span = io.opentelemetry.api.trace.Span.current(); + return span.getSpanContext().isValid()? new Span(span): null; } public TraceScope newScope(String description) { - return nullTraceScope; + Span span = new Span(OTelTracer.spanBuilder(description).startSpan()); + return new TraceScope(span); } public Span newSpan(String description, SpanContext spanCtx) { - return new Span(); + io.opentelemetry.api.trace.Span parentSpan = io.opentelemetry.api.trace.Span.wrap(spanCtx.getSpanContext()); + io.opentelemetry.api.trace.Span span = OTelTracer.spanBuilder(description).setParent(Context.current().with(parentSpan)).startSpan(); + return new Span(span); } public TraceScope newScope(String description, SpanContext spanCtx) { - return nullTraceScope; + return new TraceScope(newSpan(description, spanCtx)); } public TraceScope newScope(String description, SpanContext spanCtx, boolean finishSpanOnClose) { - return nullTraceScope; + return new TraceScope(newSpan(description, spanCtx)); } public TraceScope activateSpan(Span span) { - return nullTraceScope; + return new TraceScope(span); } public void close() { @@ -88,9 +111,45 @@ public Builder conf(TraceConfiguration conf) { return this; } + OpenTelemetry initialiseJaegerExporter(String jaegerHost, int jaegerPort, String name) { + ManagedChannel jaegerChannel = + ManagedChannelBuilder.forAddress(jaegerHost, jaegerPort).usePlaintext().build(); + // Export traces to Jaeger + JaegerGrpcSpanExporter jaegerExporter = + JaegerGrpcSpanExporter.builder() + .setChannel(jaegerChannel) + .setTimeout(30, TimeUnit.SECONDS) + .build(); + Resource serviceNameResource = + Resource.create(Attributes.of(AttributeKey.stringKey("service.name"), name)); + // Set to process the spans by the Jaeger Exporter + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(jaegerExporter)) + .setResource(Resource.getDefault().merge(serviceNameResource)) + .build(); + OpenTelemetrySdk openTelemetry = + OpenTelemetrySdk.builder().setTracerProvider(tracerProvider).build(); + + // it's always a good idea to shut down the SDK cleanly at JVM exit. + Runtime.getRuntime().addShutdownHook(new Thread(tracerProvider::close)); + + return openTelemetry; + } + + OpenTelemetry noOpTracer(){ + return OpenTelemetry.noop(); + } + public Tracer build() { if (globalTracer == null) { - globalTracer = new Tracer(name); + //jaeger tracing changes + OpenTelemetry openTelemetry = initialiseJaegerExporter("localhost", 14250, name); + io.opentelemetry.api.trace.Tracer tracer = openTelemetry.getTracer(name); + globalTracer = new Tracer(name, tracer); + + //globalTracer = new Tracer(name, noOpTracer().getTracer(name)); + Tracer.globalTracer = globalTracer; } return globalTracer; } diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 90a7c4420f8e5..802352f7103d0 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -86,7 +86,7 @@ 1.1 - 2.5.0 + 3.7.1 3.7.1 ${env.HADOOP_PROTOC_PATH} From 22a0e5b84ebe19a14ad0a3efdd45a87f494e38ba Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Wed, 24 Mar 2021 23:22:33 +0530 Subject: [PATCH 146/666] HDFS-15918. Replace deprecated RAND_pseudo_bytes (#2811) --- .../libhdfspp/lib/common/sasl_authenticator.h | 2 +- .../libhdfspp/lib/common/sasl_digest_md5.cc | 21 ++++++++++++++----- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/sasl_authenticator.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/sasl_authenticator.h index 78b2a557449ff..f3b190fea85ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/sasl_authenticator.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/sasl_authenticator.h @@ -49,7 +49,7 @@ class DigestMD5Authenticator { static size_t NextToken(const std::string &payload, size_t off, std::string *tok); - void GenerateCNonce(); + Status GenerateCNonce(); std::string username_; std::string password_; std::string nonce_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/sasl_digest_md5.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/sasl_digest_md5.cc index 3ca85786d15f5..cdb25d7e1a097 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/sasl_digest_md5.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/sasl_digest_md5.cc @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -91,12 +92,19 @@ size_t DigestMD5Authenticator::NextToken(const std::string &payload, size_t off, return off; } -void DigestMD5Authenticator::GenerateCNonce() { - if (!TEST_mock_cnonce_) { - char buf[8] = {0,}; - RAND_pseudo_bytes(reinterpret_cast(buf), sizeof(buf)); +Status DigestMD5Authenticator::GenerateCNonce() { + if (TEST_mock_cnonce_) { + return Status::OK(); + } + + char buf[8] = { 0, }; + if (RAND_bytes(reinterpret_cast(buf), sizeof(buf)) == 1) { cnonce_ = Base64Encode(std::string(buf, sizeof(buf))); + return Status::OK(); } + + const auto* error = ERR_reason_error_string(ERR_get_error()); + return Status::Error(error); } Status DigestMD5Authenticator::ParseFirstChallenge(const std::string &payload) { @@ -155,8 +163,11 @@ Status DigestMD5Authenticator::GenerateFirstResponse(std::string *result) { return Status::Unimplemented(); } + if (auto status = GenerateCNonce(); !status.ok()) { + return status; + } + std::stringstream ss; - GenerateCNonce(); ss << "charset=utf-8,username=\"" << QuoteString(username_) << "\"" << ",authzid=\"" << QuoteString(username_) << "\"" << ",nonce=\"" << QuoteString(nonce_) << "\"" From aac7979825bbccdd16a0c04157eb4500f5d05de0 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Thu, 25 Mar 2021 12:24:14 +0800 Subject: [PATCH 147/666] HDFS-15919. BlockPoolManager should log stack trace if unable to get Namenode addresses. Contributed by Stephen O'Donnell. --- .../apache/hadoop/hdfs/server/datanode/BlockPoolManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java index 390577f8b54bc..95bc2d6c23856 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java @@ -158,7 +158,7 @@ void refreshNamenodes(Configuration conf) newLifelineAddressMap = DFSUtil.getNNLifelineRpcAddressesForCluster(conf); } catch (IOException ioe) { - LOG.warn("Unable to get NameNode addresses."); + LOG.warn("Unable to get NameNode addresses.", ioe); } if (newAddressMap == null || newAddressMap.isEmpty()) { From 28f87d961e34a5cae7bf954ce894e3784a42e6c9 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 25 Mar 2021 19:53:30 +0900 Subject: [PATCH 148/666] HADOOP-17602. Upgrade JUnit to 4.13.1. Contributed by Ahmed Hussein. --- hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml | 2 +- hadoop-project/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml b/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml index d5fe11e9e3f51..963fa37426cc6 100755 --- a/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml @@ -116,7 +116,7 @@ junit junit - 4.12 + ${junit.version} test diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 802352f7103d0..1a549c8550729 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -199,7 +199,7 @@ 1.26 1.4.8 2.0.2 - 4.12 + 4.13.1 5.5.1 5.5.1 1.5.1 From 23c0a6f24cfc668505ff518ef613bb5f73b7f437 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 26 Mar 2021 04:09:43 +0900 Subject: [PATCH 149/666] HADOOP-17133. Implement HttpServer2 metrics (#2145) --- .../fs/CommonConfigurationKeysPublic.java | 9 + .../org/apache/hadoop/http/HttpServer2.java | 45 +++++ .../hadoop/http/HttpServer2Metrics.java | 164 ++++++++++++++++++ .../src/main/resources/core-default.xml | 9 + .../apache/hadoop/http/TestHttpServer.java | 39 +++++ .../fs/http/server/HttpFSServerWebServer.java | 3 + 6 files changed, 269 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2Metrics.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 3b31449decb5c..20bb0350d191b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -949,6 +949,15 @@ public class CommonConfigurationKeysPublic { /** Defalt value for HADOOP_HTTP_LOGS_ENABLED */ public static final boolean HADOOP_HTTP_LOGS_ENABLED_DEFAULT = true; + /** + * @see + * + * core-default.xml + */ + public static final String HADOOP_HTTP_METRICS_ENABLED = + "hadoop.http.metrics.enabled"; + public static final boolean HADOOP_HTTP_METRICS_ENABLED_DEFAULT = true; + /** * @see * diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index cdc2a74133af2..7534cba45ee13 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -50,6 +50,7 @@ import javax.servlet.http.HttpServletRequestWrapper; import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; @@ -93,6 +94,7 @@ import org.eclipse.jetty.server.handler.ContextHandlerCollection; import org.eclipse.jetty.server.handler.HandlerCollection; import org.eclipse.jetty.server.handler.RequestLogHandler; +import org.eclipse.jetty.server.handler.StatisticsHandler; import org.eclipse.jetty.server.session.SessionHandler; import org.eclipse.jetty.servlet.FilterHolder; import org.eclipse.jetty.servlet.FilterMapping; @@ -201,6 +203,9 @@ public final class HttpServer2 implements FilterContainer { protected static final String PROMETHEUS_SINK = "PROMETHEUS_SINK"; private PrometheusMetricsSink prometheusMetricsSink; + private StatisticsHandler statsHandler; + private HttpServer2Metrics metrics; + /** * Class to construct instances of HTTP server with specific options. */ @@ -669,6 +674,27 @@ private void initializeWebServer(String name, String hostName, addDefaultApps(contexts, appDir, conf); webServer.setHandler(handlers); + if (conf.getBoolean( + CommonConfigurationKeysPublic.HADOOP_HTTP_METRICS_ENABLED, + CommonConfigurationKeysPublic.HADOOP_HTTP_METRICS_ENABLED_DEFAULT)) { + // Jetty StatisticsHandler must be inserted as the first handler. + // The tree might look like this: + // + // - StatisticsHandler (for all requests) + // - HandlerList + // - ContextHandlerCollection + // - RequestLogHandler (if enabled) + // - WebAppContext + // - SessionHandler + // - Servlets + // - Filters + // - etc.. + // + // Reference: https://www.eclipse.org/lists/jetty-users/msg06273.html + statsHandler = new StatisticsHandler(); + webServer.insertHandler(statsHandler); + } + Map xFrameParams = setHeaders(conf); addGlobalFilter("safety", QuotingInputFilter.class.getName(), xFrameParams); final FilterInitializer[] initializers = getFilterInitializers(conf); @@ -1227,6 +1253,16 @@ public void start() throws IOException { .register("prometheus", "Hadoop metrics prometheus exporter", prometheusMetricsSink); } + if (statsHandler != null) { + // Create metrics source for each HttpServer2 instance. + // Use port number to make the metrics source name unique. + int port = -1; + for (ServerConnector connector : listeners) { + port = connector.getLocalPort(); + break; + } + metrics = HttpServer2Metrics.create(statsHandler, port); + } } catch (IOException ex) { LOG.info("HttpServer.start() threw a non Bind IOException", ex); throw ex; @@ -1409,6 +1445,9 @@ public void stop() throws Exception { try { webServer.stop(); + if (metrics != null) { + metrics.remove(); + } } catch (Exception e) { LOG.error("Error while stopping web server for webapp " + webAppContext.getDisplayName(), e); @@ -1789,4 +1828,10 @@ private Map getDefaultHeaders() { splitVal[1]); return headers; } + + @VisibleForTesting + HttpServer2Metrics getMetrics() { + return metrics; + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2Metrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2Metrics.java new file mode 100644 index 0000000000000..7a74e7be3f74d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2Metrics.java @@ -0,0 +1,164 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import org.eclipse.jetty.server.handler.StatisticsHandler; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; + +/** + * This class collects all the metrics of Jetty's StatisticsHandler + * and expose them as Hadoop Metrics. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +@Metrics(name="HttpServer2", about="HttpServer2 metrics", context="http") +public class HttpServer2Metrics { + + private final StatisticsHandler handler; + private final int port; + + @Metric("number of requested that have been asynchronously dispatched") + public int asyncDispatches() { + return handler.getAsyncDispatches(); + } + @Metric("total number of async requests") + public int asyncRequests() { + return handler.getAsyncRequests(); + } + @Metric("currently waiting async requests") + public int asyncRequestsWaiting() { + return handler.getAsyncRequestsWaiting(); + } + @Metric("maximum number of waiting async requests") + public int asyncRequestsWaitingMax() { + return handler.getAsyncRequestsWaitingMax(); + } + @Metric("number of dispatches") + public int dispatched() { + return handler.getDispatched(); + } + @Metric("number of dispatches currently active") + public int dispatchedActive() { + return handler.getDispatchedActive(); + } + @Metric("maximum number of active dispatches being handled") + public int dispatchedActiveMax() { + return handler.getDispatchedActiveMax(); + } + @Metric("maximum time spend in dispatch handling (in ms)") + public long dispatchedTimeMax() { + return handler.getDispatchedTimeMax(); + } + @Metric("mean time spent in dispatch handling (in ms)") + public double dispatchedTimeMean() { + return handler.getDispatchedTimeMean(); + } + @Metric("standard deviation for dispatch handling (in ms)") + public double dispatchedTimeStdDev() { + return handler.getDispatchedTimeStdDev(); + } + @Metric("total time spent in dispatch handling (in ms)") + public long dispatchedTimeTotal() { + return handler.getDispatchedTimeTotal(); + } + @Metric("number of async requests requests that have expired") + public int expires() { + return handler.getExpires(); + } + @Metric("number of requests") + public int requests() { + return handler.getRequests(); + } + @Metric("number of requests currently active") + public int requestsActive() { + return handler.getRequestsActive(); + } + @Metric("maximum number of active requests") + public int requestsActiveMax() { + return handler.getRequestsActiveMax(); + } + @Metric("maximum time spend handling requests (in ms)") + public long requestTimeMax() { + return handler.getRequestTimeMax(); + } + @Metric("mean time spent handling requests (in ms)") + public double requestTimeMean() { + return handler.getRequestTimeMean(); + } + @Metric("standard deviation for request handling (in ms)") + public double requestTimeStdDev() { + return handler.getRequestTimeStdDev(); + } + @Metric("total time spend in all request handling (in ms)") + public long requestTimeTotal() { + return handler.getRequestTimeTotal(); + } + @Metric("number of requests with 1xx response status") + public int responses1xx() { + return handler.getResponses1xx(); + } + @Metric("number of requests with 2xx response status") + public int responses2xx() { + return handler.getResponses2xx(); + } + @Metric("number of requests with 3xx response status") + public int responses3xx() { + return handler.getResponses3xx(); + } + @Metric("number of requests with 4xx response status") + public int responses4xx() { + return handler.getResponses4xx(); + } + @Metric("number of requests with 5xx response status") + public int responses5xx() { + return handler.getResponses5xx(); + } + @Metric("total number of bytes across all responses") + public long responsesBytesTotal() { + return handler.getResponsesBytesTotal(); + } + @Metric("time in milliseconds stats have been collected for") + public long statsOnMs() { + return handler.getStatsOnMs(); + } + + HttpServer2Metrics(StatisticsHandler handler, int port) { + this.handler = handler; + this.port = port; + } + + static HttpServer2Metrics create(StatisticsHandler handler, int port) { + final MetricsSystem ms = DefaultMetricsSystem.instance(); + final HttpServer2Metrics metrics = new HttpServer2Metrics(handler, port); + // Remove the old metrics from metrics system to avoid duplicate error + // when HttpServer2 is started twice. + metrics.remove(); + // Add port number to the suffix to allow multiple instances in a host. + return ms.register("HttpServer2-" + port, "HttpServer2 metrics", metrics); + } + + void remove() { + DefaultMetricsSystem.removeSourceName("HttpServer2-" + port); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 11b790408b79d..4794bb2764c80 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -56,6 +56,15 @@ + + hadoop.http.metrics.enabled + true + + If true, set Jetty's StatisticsHandler to HTTP server to collect + HTTP layer metrics and register them to Hadoop metrics system. + + + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index ad9617dca79de..e3cb028f5f553 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.http.HttpServer2.QuotingInputFilter.RequestQuoter; import org.apache.hadoop.http.resource.JerseyResource; import org.apache.hadoop.net.NetUtils; @@ -29,7 +30,10 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.test.Whitebox; + +import org.assertj.core.api.Assertions; import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.server.handler.StatisticsHandler; import org.eclipse.jetty.util.ajax.JSON; import org.junit.AfterClass; import org.junit.Assert; @@ -148,6 +152,8 @@ public void doGet(HttpServletRequest request, @BeforeClass public static void setup() throws Exception { Configuration conf = new Configuration(); conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS); + conf.setBoolean( + CommonConfigurationKeysPublic.HADOOP_HTTP_METRICS_ENABLED, true); server = createTestServer(conf); server.addServlet("echo", "/echo", EchoServlet.class); server.addServlet("echomap", "/echomap", EchoMapServlet.class); @@ -272,6 +278,39 @@ public void testAcceptorSelectorConfigurability() throws Exception { conn.getContentType()); } + @Test + public void testHttpServer2Metrics() throws Exception { + final HttpServer2Metrics metrics = server.getMetrics(); + final int before = metrics.responses2xx(); + final URL servletUrl = new URL(baseUrl, "/echo?echo"); + final HttpURLConnection conn = + (HttpURLConnection)servletUrl.openConnection(); + conn.connect(); + Assertions.assertThat(conn.getResponseCode()).isEqualTo(200); + final int after = metrics.responses2xx(); + Assertions.assertThat(after).isGreaterThan(before); + } + + /** + * Jetty StatisticsHandler must be inserted via Server#insertHandler + * instead of Server#setHandler. The server fails to start if + * the handler is added by setHandler. + */ + @Test + public void testSetStatisticsHandler() throws Exception { + final Configuration conf = new Configuration(); + // skip insert + conf.setBoolean( + CommonConfigurationKeysPublic.HADOOP_HTTP_METRICS_ENABLED, false); + final HttpServer2 testServer = createTestServer(conf); + testServer.webServer.setHandler(new StatisticsHandler()); + try { + testServer.start(); + fail("IOException should be thrown."); + } catch (IOException ignore) { + } + } + @Test public void testHttpResonseContainsXFrameOptions() throws Exception { validateXFrameOption(HttpServer2.XFrameOption.SAMEORIGIN); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java index 24a30f4db0efb..a59d899ae3981 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpServer2; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer; import org.apache.hadoop.security.authorize.AccessControlList; @@ -150,6 +151,7 @@ private static void deprecateEnv(String varName, Configuration conf, } public void start() throws IOException { + DefaultMetricsSystem.initialize("httpfs"); httpServer.start(); } @@ -159,6 +161,7 @@ public void join() throws InterruptedException { public void stop() throws Exception { httpServer.stop(); + DefaultMetricsSystem.shutdown(); } public URL getUrl() { From 745190cf007047ad473593e15c55fd86cd2a926f Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Thu, 25 Mar 2021 22:33:58 +0000 Subject: [PATCH 150/666] YARN-10713. ClusterMetrics should support custom resource capacity related metrics. Contributed by Qi Zhu. --- .../resourcemanager/ClusterMetrics.java | 50 +++++++++++++------ .../TestCSAllocateCustomResource.java | 6 ++- 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java index 7fe5cc9703b1b..95ef7a6a69f2f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.metrics2.lib.Interns.info; +import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.classification.InterfaceAudience; @@ -35,6 +36,9 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.metrics.CustomResourceMetricValue; +import org.apache.hadoop.yarn.metrics.CustomResourceMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetricsForCustomResources; import org.apache.hadoop.yarn.util.resource.ResourceUtils; @InterfaceAudience.Private @@ -58,10 +62,19 @@ public class ClusterMetrics { @Metric("Vcore Utilization") MutableGaugeLong utilizedVirtualCores; @Metric("Memory Capability") MutableGaugeLong capabilityMB; @Metric("Vcore Capability") MutableGaugeLong capabilityVirtualCores; - @Metric("GPU Capability") MutableGaugeLong capabilityGPUs; private static final MetricsInfo RECORD_INFO = info("ClusterMetrics", "Metrics for the Yarn Cluster"); + + private static final String CUSTOM_RESOURCE_CAPABILITY_METRIC_PREFIX = + "Capability."; + private static final String CUSTOM_RESOURCE_CAPABILITY_METRIC_DESC = + "NAME Capability"; + + private static CustomResourceMetrics customResourceMetrics; + + private final CustomResourceMetricValue customResourceCapability = + new CustomResourceMetricValue(); private static volatile ClusterMetrics INSTANCE = null; private static MetricsRegistry registry; @@ -86,6 +99,17 @@ private static void registerMetrics() { if (ms != null) { ms.register("ClusterMetrics", "Metrics for the Yarn Cluster", INSTANCE); } + + if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) { + customResourceMetrics = + new CustomResourceMetrics(); + Map customResources = + customResourceMetrics.initAndGetCustomResources(); + customResourceMetrics. + registerCustomResources(customResources, + registry, CUSTOM_RESOURCE_CAPABILITY_METRIC_PREFIX, + CUSTOM_RESOURCE_CAPABILITY_METRIC_DESC); + } } @VisibleForTesting @@ -209,23 +233,20 @@ public long getCapabilityVirtualCores() { return capabilityVirtualCores.value(); } - public long getCapabilityGPUs() { - if (capabilityGPUs == null) { - return 0; - } + public Map getCustomResourceCapability() { + return customResourceCapability.getValues(); + } - return capabilityGPUs.value(); + public void setCustomResourceCapability(Resource res) { + this.customResourceCapability.set(res); } public void incrCapability(Resource res) { if (res != null) { capabilityMB.incr(res.getMemorySize()); capabilityVirtualCores.incr(res.getVirtualCores()); - Integer gpuIndex = ResourceUtils.getResourceTypeIndex() - .get(ResourceInformation.GPU_URI); - if (gpuIndex != null) { - capabilityGPUs.incr(res. - getResourceValue(ResourceInformation.GPU_URI)); + if (customResourceCapability != null) { + customResourceCapability.increase(res); } } } @@ -234,11 +255,8 @@ public void decrCapability(Resource res) { if (res != null) { capabilityMB.decr(res.getMemorySize()); capabilityVirtualCores.decr(res.getVirtualCores()); - Integer gpuIndex = ResourceUtils.getResourceTypeIndex() - .get(ResourceInformation.GPU_URI); - if (gpuIndex != null) { - capabilityGPUs.decr(res. - getResourceValue(ResourceInformation.GPU_URI)); + if (customResourceCapability != null) { + customResourceCapability.decrease(res); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java index d6f15446441d6..7b0254cdcc5da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java @@ -231,7 +231,8 @@ public void testClusterMetricsWithGPU() assertEquals("Cluster Capability Vcores incorrect", metrics.getCapabilityVirtualCores(), 4 * 8); assertEquals("Cluster Capability GPUs incorrect", - metrics.getCapabilityGPUs(), 4 * 8); + (metrics.getCustomResourceCapability() + .get(GPU_URI)).longValue(), 4 * 8); for (RMNode rmNode : rmNodes) { nodeTracker.removeNode(rmNode.getNodeID()); @@ -243,7 +244,8 @@ public void testClusterMetricsWithGPU() assertEquals("Cluster Capability Vcores incorrect", metrics.getCapabilityVirtualCores(), 0); assertEquals("Cluster Capability GPUs incorrect", - metrics.getCapabilityGPUs(), 0); + (metrics.getCustomResourceCapability() + .get(GPU_URI)).longValue(), 0); ClusterMetrics.destroy(); } } From a947edb14df2a2000ba954a7085b9a8889036fa5 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Fri, 26 Mar 2021 21:56:35 +0530 Subject: [PATCH 151/666] HDFS-15922. Use memcpy for copying non-null terminated string in jni_helper.c (#2818) --- .../src/main/native/libhdfs/jni_helper.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c index 4efb3b61b4394..bbbc8b4602b54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/jni_helper.c @@ -422,9 +422,9 @@ static ssize_t wildcard_expandPath(const char* path, char* expanded) if (expanded != NULL) { // pathLength includes an extra '.' - strncpy(dest, path, pathLength-1); + memcpy(dest, path, pathLength - 1); dest += pathLength - 1; - strncpy(dest, filename, filenameLength); + memcpy(dest, filename, filenameLength); dest += filenameLength; *dest = PATH_SEPARATOR; dest++; @@ -536,7 +536,7 @@ static ssize_t getClassPath_helper(const char *classpath, char* expandedClasspat // +1 for path separator or null terminator length += tokenlen + 1; if (expandedCP_curr != NULL) { - strncpy(expandedCP_curr, cp_token, tokenlen); + memcpy(expandedCP_curr, cp_token, tokenlen); expandedCP_curr += tokenlen; *expandedCP_curr = PATH_SEPARATOR; expandedCP_curr++; From c6ed484f38a839e5d924026704aba97d206345eb Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sat, 27 Mar 2021 03:01:41 +0530 Subject: [PATCH 152/666] HADOOP-17531.Addendum: DistCp: Reduce memory usage on copying huge directories. (#2820). Contributed by Ayush Saxena. Signed-off-by: Steve Loughran --- .../test/java/org/apache/hadoop/test/GenericTestUtils.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 33ebd8695b263..8b1750def78b8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -30,6 +30,7 @@ import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; import java.lang.reflect.InvocationTargetException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -70,7 +71,6 @@ import org.mockito.stubbing.Answer; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; @@ -927,7 +927,7 @@ private static CompletableFuture put(FileSystem fs, return submit(EXECUTOR, () -> { try (DurationInfo ignore = new DurationInfo(LOG, false, "Creating %s", path)) { - createFile(fs, path, true, text.getBytes(Charsets.UTF_8)); + createFile(fs, path, true, text.getBytes(StandardCharsets.UTF_8)); return path; } }); From 789b939e5f656d32289d1a8cfb282860db159990 Mon Sep 17 00:00:00 2001 From: litao Date: Sat, 27 Mar 2021 16:57:09 +0800 Subject: [PATCH 153/666] HDFS-15879. Exclude slow nodes when choose targets for blocks (#2748) Reviewed-by: Dinesh Chitlangia Reviewed-by: Takanobu Asanuma --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 15 ++ .../BlockPlacementPolicyDefault.java | 22 ++- .../blockmanagement/DatanodeManager.java | 109 ++++++++++++++- .../blockmanagement/SlowPeerTracker.java | 18 +++ .../src/main/resources/hdfs-default.xml | 30 ++++ ...TestReplicationPolicyExcludeSlowNodes.java | 131 ++++++++++++++++++ 6 files changed, 322 insertions(+), 3 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b2c94544811d4..23897a502f108 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1023,6 +1023,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.datanode.outliers.report.interval"; public static final String DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT = "30m"; + public static final String DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY = + "dfs.namenode.max.slowpeer.collect.nodes"; + public static final int DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT = + 5; + public static final String DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_KEY = + "dfs.namenode.slowpeer.collect.interval"; + public static final String DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_DEFAULT = + "30m"; // property for fsimage compression public static final String DFS_IMAGE_COMPRESS_KEY = "dfs.image.compress"; @@ -1176,6 +1184,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_BLOCKPLACEMENTPOLICY_DEFAULT_PREFER_LOCAL_NODE_KEY = "dfs.namenode.block-placement-policy.default.prefer-local-node"; public static final boolean DFS_NAMENODE_BLOCKPLACEMENTPOLICY_DEFAULT_PREFER_LOCAL_NODE_DEFAULT = true; + public static final String + DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY = + "dfs.namenode.block-placement-policy.exclude-slow-nodes.enabled"; + public static final boolean + DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT = + false; + public static final String DFS_NAMENODE_GC_TIME_MONITOR_ENABLE = "dfs.namenode.gc.time.monitor.enable"; public static final boolean DFS_NAMENODE_GC_TIME_MONITOR_ENABLE_DEFAULT = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index df687f4cde085..9f68c36033cfc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOADBYSTORAGETYPE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOADBYSTORAGETYPE_KEY; import static org.apache.hadoop.util.Time.monotonicNow; @@ -82,7 +84,8 @@ private enum NodeNotChosenReason { NODE_TOO_BUSY("the node is too busy"), TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"), NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block"), - NO_REQUIRED_STORAGE_TYPE("required storage types are unavailable"); + NO_REQUIRED_STORAGE_TYPE("required storage types are unavailable"), + NODE_SLOW("the node is too slow"); private final String text; @@ -99,6 +102,8 @@ private String getText() { private boolean considerLoadByStorageType; protected double considerLoadFactor; private boolean preferLocalNode; + private boolean dataNodePeerStatsEnabled; + private boolean excludeSlowNodesEnabled; protected NetworkTopology clusterMap; protected Host2NodesMap host2datanodeMap; private FSClusterStats stats; @@ -144,6 +149,12 @@ public void initialize(Configuration conf, FSClusterStats stats, DFS_NAMENODE_BLOCKPLACEMENTPOLICY_DEFAULT_PREFER_LOCAL_NODE_KEY, DFSConfigKeys. DFS_NAMENODE_BLOCKPLACEMENTPOLICY_DEFAULT_PREFER_LOCAL_NODE_DEFAULT); + this.dataNodePeerStatsEnabled = conf.getBoolean( + DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY, + DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_DEFAULT); + this.excludeSlowNodesEnabled = conf.getBoolean( + DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, + DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT); } @Override @@ -1091,6 +1102,15 @@ boolean isGoodDatanode(DatanodeDescriptor node, return false; } + // check if the target is a slow node + if (dataNodePeerStatsEnabled && excludeSlowNodesEnabled) { + Set nodes = DatanodeManager.getSlowNodes(); + if (nodes.contains(node)) { + logNodeIsNotChosen(node, NodeNotChosenReason.NODE_SLOW); + return false; + } + } + return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 454e4846f38fc..6939e65ba80a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -18,8 +18,12 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT; import static org.apache.hadoop.util.Time.monotonicNow; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; @@ -53,6 +57,7 @@ import org.apache.hadoop.net.*; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Timer; @@ -201,8 +206,16 @@ public class DatanodeManager { */ private final boolean useDfsNetworkTopology; + private static final String IP_PORT_SEPARATOR = ":"; + @Nullable private final SlowPeerTracker slowPeerTracker; + private static Set slowNodesSet = Sets.newConcurrentHashSet(); + private Daemon slowPeerCollectorDaemon; + private final long slowPeerCollectionInterval; + private final int maxSlowPeerReportNodes; + private boolean excludeSlowNodesEnabled; + @Nullable private final SlowDiskTracker slowDiskTracker; @@ -242,11 +255,22 @@ public class DatanodeManager { DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, DFSConfigKeys. DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT)); - final Timer timer = new Timer(); this.slowPeerTracker = dataNodePeerStatsEnabled ? new SlowPeerTracker(conf, timer) : null; - + this.excludeSlowNodesEnabled = conf.getBoolean( + DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, + DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT); + this.maxSlowPeerReportNodes = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, + DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT); + this.slowPeerCollectionInterval = conf.getTimeDuration( + DFSConfigKeys.DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_KEY, + DFSConfigKeys.DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); + if (slowPeerTracker != null && excludeSlowNodesEnabled) { + startSlowPeerCollector(); + } this.slowDiskTracker = dataNodeDiskStatsEnabled ? new SlowDiskTracker(conf, timer) : null; @@ -356,6 +380,44 @@ public class DatanodeManager { DFSConfigKeys.DFS_NAMENODE_BLOCKS_PER_POSTPONEDBLOCKS_RESCAN_KEY_DEFAULT); } + private void startSlowPeerCollector() { + if (slowPeerCollectorDaemon != null) { + return; + } + slowPeerCollectorDaemon = new Daemon(new Runnable() { + @Override + public void run() { + while (true) { + try { + slowNodesSet = getSlowPeers(); + } catch (Exception e) { + LOG.error("Failed to collect slow peers", e); + } + + try { + Thread.sleep(slowPeerCollectionInterval); + } catch (InterruptedException e) { + LOG.error("Slow peers collection thread interrupted", e); + return; + } + } + } + }); + slowPeerCollectorDaemon.start(); + } + + public void stopSlowPeerCollector() { + if (slowPeerCollectorDaemon == null) { + return; + } + slowPeerCollectorDaemon.interrupt(); + try { + slowPeerCollectorDaemon.join(); + } catch (InterruptedException e) { + LOG.error("Slow peers collection thread did not shutdown", e); + } + } + private static long getStaleIntervalFromConf(Configuration conf, long heartbeatExpireInterval) { long staleInterval = conf.getLong( @@ -401,6 +463,7 @@ void activate(final Configuration conf) { void close() { datanodeAdminManager.close(); heartbeatManager.close(); + stopSlowPeerCollector(); } /** @return the network topology. */ @@ -2019,6 +2082,48 @@ public String getSlowPeersReport() { return slowPeerTracker != null ? slowPeerTracker.getJson() : null; } + /** + * Returns all tracking slow peers. + * @return + */ + public Set getSlowPeers() { + Set slowPeersSet = Sets.newConcurrentHashSet(); + if (slowPeerTracker == null) { + return slowPeersSet; + } + ArrayList slowNodes = + slowPeerTracker.getSlowNodes(maxSlowPeerReportNodes); + for (String slowNode : slowNodes) { + if (StringUtils.isBlank(slowNode) + || !slowNode.contains(IP_PORT_SEPARATOR)) { + continue; + } + String ipAddr = slowNode.split(IP_PORT_SEPARATOR)[0]; + DatanodeDescriptor datanodeByHost = + host2DatanodeMap.getDatanodeByHost(ipAddr); + if (datanodeByHost != null) { + slowPeersSet.add(datanodeByHost); + } + } + return slowPeersSet; + } + + /** + * Returns all tracking slow peers. + * @return + */ + public static Set getSlowNodes() { + return slowNodesSet; + } + + /** + * Use only for testing. + */ + @VisibleForTesting + public SlowPeerTracker getSlowPeerTracker() { + return slowPeerTracker; + } + /** * Use only for testing. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java index 06dd2c0102651..c4b1861024c7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java @@ -34,6 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; @@ -233,6 +234,23 @@ public SortedSet getReportingNodes() { } } + /** + * Returns all tracking slow peers. + * @param numNodes + * @return + */ + public ArrayList getSlowNodes(int numNodes) { + Collection jsonReports = getJsonReports(numNodes); + ArrayList slowNodes = new ArrayList<>(); + for (ReportForJson jsonReport : jsonReports) { + slowNodes.add(jsonReport.getSlowNode()); + } + if (!slowNodes.isEmpty()) { + LOG.warn("Slow nodes list: " + slowNodes); + } + return slowNodes; + } + /** * Retrieve reports in a structure for generating JSON, limiting the * output to the top numNodes nodes i.e nodes with the most reports. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index df4df489bfabb..57e3b1bcb1fe7 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2368,6 +2368,36 @@ + + dfs.namenode.block-placement-policy.exclude-slow-nodes.enabled + false + + If this is set to true, we will filter out slow nodes + when choosing targets for blocks. + + + + + dfs.namenode.max.slowpeer.collect.nodes + 5 + + How many slow nodes we will collect for filtering out + when choosing targets for blocks. + + It is ignored if dfs.namenode.block-placement-policy.exclude-slow-nodes.enabled is false. + + + + + dfs.namenode.slowpeer.collect.interval + 30m + + Interval at which the slow peer trackers runs in the background to collect slow peers. + + It is ignored if dfs.namenode.block-placement-policy.exclude-slow-nodes.enabled is false. + + + dfs.datanode.fileio.profiling.sampling.percentage 0 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java new file mode 100644 index 0000000000000..f40317d8e70db --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.TestBlockStoragePolicy; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.net.Node; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Set; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +@RunWith(Parameterized.class) +public class TestReplicationPolicyExcludeSlowNodes + extends BaseReplicationPolicyTest { + + public TestReplicationPolicyExcludeSlowNodes(String blockPlacementPolicy) { + this.blockPlacementPolicy = blockPlacementPolicy; + } + + @Parameterized.Parameters + public static Iterable data() { + return Arrays.asList(new Object[][] { + {BlockPlacementPolicyDefault.class.getName()}, + {BlockPlacementPolicyWithUpgradeDomain.class.getName()}, + {AvailableSpaceBlockPlacementPolicy.class.getName()}, + {BlockPlacementPolicyRackFaultTolerant.class.getName()}, + {AvailableSpaceRackFaultTolerantBlockPlacementPolicy.class.getName()}, + }); + } + + @Override + DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) { + conf.setBoolean(DFSConfigKeys + .DFS_DATANODE_PEER_STATS_ENABLED_KEY, + true); + conf.setStrings(DFSConfigKeys + .DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_KEY, + "1s"); + conf.setBoolean(DFSConfigKeys + .DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, + true); + final String[] racks = { + "/rack1", + "/rack2", + "/rack3", + "/rack4", + "/rack5", + "/rack6"}; + storages = DFSTestUtil.createDatanodeStorageInfos(racks); + return DFSTestUtil.toDatanodeDescriptor(storages); + } + + /** + * Tests that chooseTarget when excludeSlowNodesEnabled set to true. + */ + @Test + public void testChooseTargetExcludeSlowNodes() throws Exception { + namenode.getNamesystem().writeLock(); + try { + // add nodes + for (int i = 0; i < dataNodes.length; i++) { + dnManager.addDatanode(dataNodes[i]); + } + + // mock slow nodes + SlowPeerTracker tracker = dnManager.getSlowPeerTracker(); + tracker.addReport(dataNodes[0].getInfoAddr(), dataNodes[3].getInfoAddr()); + tracker.addReport(dataNodes[0].getInfoAddr(), dataNodes[4].getInfoAddr()); + tracker.addReport(dataNodes[1].getInfoAddr(), dataNodes[4].getInfoAddr()); + tracker.addReport(dataNodes[1].getInfoAddr(), dataNodes[5].getInfoAddr()); + tracker.addReport(dataNodes[2].getInfoAddr(), dataNodes[3].getInfoAddr()); + tracker.addReport(dataNodes[2].getInfoAddr(), dataNodes[5].getInfoAddr()); + + // waiting for slow nodes collector run + Thread.sleep(3000); + + // fetch slow nodes + Set slowPeers = dnManager.getSlowPeers(); + + // assert slow nodes + assertEquals(3, slowPeers.size()); + for (int i = 0; i < slowPeers.size(); i++) { + assertTrue(slowPeers.contains(dataNodes[i])); + } + + // mock writer + DatanodeDescriptor writerDn = dataNodes[0]; + + // call chooseTarget() + DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager() + .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3, + writerDn, new ArrayList(), false, null, + 1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null); + + // assert targets + assertEquals(3, targets.length); + for (int i = 0; i < targets.length; i++) { + assertTrue(!slowPeers.contains(targets[i].getDatanodeDescriptor())); + } + } finally { + namenode.getNamesystem().writeUnlock(); + } + NameNode.LOG.info("Done working on it"); + } + +} From eaaf9eff28060340b8e6ef5e04c4e8495a4e78cf Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sun, 28 Mar 2021 16:13:59 +0530 Subject: [PATCH 154/666] HDFS-15764. Notify Namenode missing or new block on disk as soon as possible. Contributed by Yang Yun. --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 5 ++ .../hadoop/hdfs/server/datanode/DNConf.java | 4 ++ .../fsdataset/impl/FsDatasetImpl.java | 25 ++++++++ .../src/main/resources/hdfs-default.xml | 8 +++ .../server/datanode/TestDirectoryScanner.java | 46 +++++++++----- .../fsdataset/impl/TestFsDatasetImpl.java | 62 ++++++++++++++++++- 6 files changed, 133 insertions(+), 17 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 23897a502f108..ac2896871e066 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -876,6 +876,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.datanode.directoryscan.throttle.limit.ms.per.sec"; public static final int DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_DEFAULT = -1; + public static final String + DFS_DATANODE_DIRECTORYSCAN_MAX_NOTIFY_COUNT_KEY = + "dfs.datanode.directoryscan.max.notify.count"; + public static final long + DFS_DATANODE_DIRECTORYSCAN_MAX_NOTIFY_COUNT_DEFAULT = 5; public static final String DFS_DATANODE_DNS_INTERFACE_KEY = "dfs.datanode.dns.interface"; public static final String DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default"; public static final String DFS_DATANODE_DNS_NAMESERVER_KEY = "dfs.datanode.dns.nameserver"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index b56dd4ec223fa..7902694d9094b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -459,4 +459,8 @@ public boolean getPmemCacheRecoveryEnabled() { public long getProcessCommandsThresholdMs() { return processCommandsThresholdMs; } + + public long getBlockReportInterval() { + return blockReportInterval; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index edf2edcb1b800..41791bb31a500 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -281,6 +281,11 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b) final InstrumentedReadWriteLock datasetRWLock; private final Condition datasetWriteLockCondition; private static String blockPoolId = ""; + + // Make limited notify times from DirectoryScanner to NameNode. + private long maxDirScannerNotifyCount; + private long curDirScannerNotifyCount; + private long lastDirScannerNotifyTime; /** * An FSDataset has a directory where it loads its data files. @@ -408,6 +413,10 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b) maxDataLength = conf.getInt( CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT); + maxDirScannerNotifyCount = conf.getLong( + DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_MAX_NOTIFY_COUNT_KEY, + DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_MAX_NOTIFY_COUNT_DEFAULT); + lastDirScannerNotifyTime = System.currentTimeMillis(); } @Override @@ -2609,6 +2618,11 @@ public void checkAndUpdate(String bpid, ScanInfo scanInfo) Block corruptBlock = null; ReplicaInfo memBlockInfo; long startTimeMs = Time.monotonicNow(); + if (startTimeMs - lastDirScannerNotifyTime > + datanode.getDnConf().getBlockReportInterval()) { + curDirScannerNotifyCount = 0; + lastDirScannerNotifyTime = startTimeMs; + } try (AutoCloseableLock lock = datasetWriteLock.acquire()) { memBlockInfo = volumeMap.get(bpid, blockId); if (memBlockInfo != null && @@ -2661,6 +2675,11 @@ public void checkAndUpdate(String bpid, ScanInfo scanInfo) // Block is in memory and not on the disk // Remove the block from volumeMap volumeMap.remove(bpid, blockId); + if (curDirScannerNotifyCount < maxDirScannerNotifyCount) { + curDirScannerNotifyCount++; + datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, + memBlockInfo), memBlockInfo.getStorageUuid()); + } if (vol.isTransientStorage()) { ramDiskReplicaTracker.discardReplica(bpid, blockId, true); } @@ -2687,6 +2706,12 @@ public void checkAndUpdate(String bpid, ScanInfo scanInfo) .setDirectoryToUse(diskFile.getParentFile()) .build(); volumeMap.add(bpid, diskBlockInfo); + if (curDirScannerNotifyCount < maxDirScannerNotifyCount) { + maxDirScannerNotifyCount++; + datanode.notifyNamenodeReceivedBlock( + new ExtendedBlock(bpid, diskBlockInfo), null, + vol.getStorageID(), vol.isTransientStorage()); + } if (vol.isTransientStorage()) { long lockedBytesReserved = cacheManager.reserve(diskBlockInfo.getNumBytes()) > 0 ? diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 57e3b1bcb1fe7..c2ae4bc829109 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -6191,4 +6191,12 @@ accessed or modified before the specified time interval. + + dfs.datanode.directoryscan.max.notify.count + 5 + + Defines the maximum number of blocks that the DirectoryScanner may notify + namenode right way for received or deleted blocks after one round. + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 12b251fbb4369..44d99a292b4b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -249,8 +249,7 @@ private String getMetaFile(long id) { } /** Create a block file in a random volume. */ - private long createBlockFile() throws IOException { - long id = getFreeBlockId(); + private long createBlockFile(long id) throws IOException { try ( FsDatasetSpi.FsVolumeReferences volumes = fds.getFsVolumeReferences()) { int numVolumes = volumes.size(); @@ -282,8 +281,7 @@ private long createMetaFile() throws IOException { } /** Create block file and corresponding metafile in a rondom volume. */ - private long createBlockMetaFile() throws IOException { - long id = getFreeBlockId(); + private long createBlockMetaFile(long id) throws IOException { try (FsDatasetSpi.FsVolumeReferences refs = fds.getFsVolumeReferences()) { int numVolumes = refs.size(); @@ -548,7 +546,7 @@ public void runTest(int parallelism) throws Exception { // Test4: A block file exists for which there is no metafile and // a block in memory - blockId = createBlockFile(); + blockId = createBlockFile(blockId); totalBlocks++; scan(totalBlocks, 1, 1, 0, 1, 0); verifyAddition(blockId, HdfsConstants.GRANDFATHER_GENERATION_STAMP, 0); @@ -563,8 +561,12 @@ public void runTest(int parallelism) throws Exception { scan(totalBlocks, 0, 0, 0, 0, 0); // Test6: A block file and metafile exists for which there is no block in - // memory - blockId = createBlockMetaFile(); + blockId = deleteBlockFile(); + scan(totalBlocks, 1, 0, 1, 0, 0); + totalBlocks--; + verifyDeletion(blockId); + + blockId = createBlockMetaFile(blockId); totalBlocks++; scan(totalBlocks, 1, 0, 0, 1, 0); verifyAddition(blockId, DEFAULT_GEN_STAMP, 0); @@ -577,9 +579,10 @@ public void runTest(int parallelism) throws Exception { scan(totalBlocks, 10, 10, 0, 0, 10); scan(totalBlocks, 0, 0, 0, 0, 0); - // Test8: Delete bunch of block files + // Test8: Delete bunch of block files and record the ids. + List ids = new ArrayList<>(); for (int i = 0; i < 10; i++) { - blockId = deleteBlockFile(); + ids.add(deleteBlockFile()); } scan(totalBlocks, 10, 0, 10, 0, 0); totalBlocks -= 10; @@ -587,7 +590,7 @@ public void runTest(int parallelism) throws Exception { // Test9: create a bunch of blocks files for (int i = 0; i < 10; i++) { - blockId = createBlockFile(); + blockId = createBlockFile(ids.get(i)); } totalBlocks += 10; scan(totalBlocks, 10, 10, 0, 10, 0); @@ -601,8 +604,15 @@ public void runTest(int parallelism) throws Exception { scan(totalBlocks, 0, 0, 0, 0, 0); // Test11: create a bunch block files and meta files + ids.clear(); for (int i = 0; i < 10; i++) { - blockId = createBlockMetaFile(); + ids.add(deleteBlockFile()); + } + scan(totalBlocks, 10, 0, 10, 0, 0); + totalBlocks -= 10; + + for (int i = 0; i < 10; i++) { + blockId = createBlockMetaFile(ids.get(i)); } totalBlocks += 10; scan(totalBlocks, 10, 0, 0, 10, 0); @@ -616,9 +626,16 @@ public void runTest(int parallelism) throws Exception { scan(totalBlocks, 0, 0, 0, 0, 0); // Test13: all the conditions combined + long blockId1 = deleteBlockFile(); + long blockId2 = deleteBlockFile(); + scan(totalBlocks, 2, 0, 2, 0, 0); + totalBlocks -= 2; + verifyDeletion(blockId1); + verifyDeletion(blockId2); + createMetaFile(); - createBlockFile(); - createBlockMetaFile(); + createBlockFile(blockId1); + createBlockMetaFile(blockId2); deleteMetaFile(); deleteBlockFile(); truncateBlockFile(); @@ -631,9 +648,6 @@ public void runTest(int parallelism) throws Exception { assertTrue("Report complier threads logged no execution time", scanner.timeRunningMs.get() > 0L); - // Test15: validate clean shutdown of DirectoryScanner - //// assertTrue(scanner.getRunStatus()); //assumes "real" FSDataset, not - // sim scanner.shutdown(); assertFalse(scanner.getRunStatus()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 8edc977a6bd70..6ae6248d3f997 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -22,6 +22,7 @@ import java.util.function.Supplier; import org.apache.hadoop.fs.DF; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; @@ -94,6 +95,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY; import static org.hamcrest.core.Is.is; @@ -1745,4 +1747,62 @@ public void testGetMetadataLengthOfFinalizedReplica() throws IOException { assertTrue(blockDir.delete()); } } -} + + @Test + public void testNotifyNamenodeMissingOrNewBlock() throws Exception { + long blockSize = 1024; + int heatbeatInterval = 1; + HdfsConfiguration c = new HdfsConfiguration(); + c.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, heatbeatInterval); + c.setLong(DFS_BLOCK_SIZE_KEY, blockSize); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(c). + numDataNodes(1).build(); + try { + cluster.waitActive(); + DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/f1"), + blockSize, (short)1, 0); + String bpid = cluster.getNameNode().getNamesystem().getBlockPoolId(); + DataNode dn = cluster.getDataNodes().get(0); + FsDatasetSpi fsdataset = dn.getFSDataset(); + List replicaInfos = + fsdataset.getSortedFinalizedBlocks(bpid); + assertEquals(1, replicaInfos.size()); + + ReplicaInfo replicaInfo = replicaInfos.get(0); + String blockPath = replicaInfo.getBlockURI().getPath(); + String metaPath = replicaInfo.getMetadataURI().getPath(); + String blockTempPath = blockPath + ".tmp"; + String metaTempPath = metaPath + ".tmp"; + File blockFile = new File(blockPath); + File blockTempFile = new File(blockTempPath); + File metaFile = new File(metaPath); + File metaTempFile = new File(metaTempPath); + + // remove block and meta file of the block + blockFile.renameTo(blockTempFile); + metaFile.renameTo(metaTempFile); + assertFalse(blockFile.exists()); + assertFalse(metaFile.exists()); + + FsVolumeSpi.ScanInfo info = new FsVolumeSpi.ScanInfo( + replicaInfo.getBlockId(), blockFile.getAbsoluteFile(), + metaFile.getAbsoluteFile(), replicaInfo.getVolume()); + fsdataset.checkAndUpdate(bpid, info); + + BlockManager blockManager = cluster.getNameNode(). + getNamesystem().getBlockManager(); + GenericTestUtils.waitFor(() -> + blockManager.getLowRedundancyBlocksCount() == 1, 100, 5000); + + // move the block and meta file back + blockTempFile.renameTo(blockFile); + metaTempFile.renameTo(metaFile); + + fsdataset.checkAndUpdate(bpid, info); + GenericTestUtils.waitFor(() -> + blockManager.getLowRedundancyBlocksCount() == 0, 100, 5000); + } finally { + cluster.shutdown(); + } + } +} \ No newline at end of file From c5b70c3a1d9cbb9c55a80e567bf7262bb28409b6 Mon Sep 17 00:00:00 2001 From: Harunobu Daikoku Date: Mon, 29 Mar 2021 11:43:29 +0900 Subject: [PATCH 155/666] HDFS-15900. RBF: empty blockpool id on dfsrouter caused by UNAVAILABLE NameNode. (#2787) Co-authored-by: Harunobu Daikoku --- .../resolver/FederationNamespaceInfo.java | 46 +++++++++++++++- .../store/impl/MembershipStoreImpl.java | 15 +++--- .../resolver/TestFederationNamespaceInfo.java | 39 ++++++++++++++ .../store/TestStateStoreMembershipState.java | 54 +++++++++++++++++++ 4 files changed, 147 insertions(+), 7 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestFederationNamespaceInfo.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java index 33edd30ec2e5f..1ef159cf8f2e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs.server.federation.resolver; +import org.apache.commons.lang3.builder.CompareToBuilder; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.hdfs.server.federation.router.RemoteLocationContext; /** @@ -75,4 +78,45 @@ public String getBlockPoolId() { public String toString() { return this.nameserviceId + "->" + this.blockPoolId + ":" + this.clusterId; } -} \ No newline at end of file + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj == this) { + return true; + } + if (obj.getClass() != getClass()) { + return false; + } + FederationNamespaceInfo other = (FederationNamespaceInfo) obj; + return new EqualsBuilder() + .append(nameserviceId, other.nameserviceId) + .append(clusterId, other.clusterId) + .append(blockPoolId, other.blockPoolId) + .isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(17, 31) + .append(nameserviceId) + .append(clusterId) + .append(blockPoolId) + .toHashCode(); + } + + @Override + public int compareTo(RemoteLocationContext info) { + if (info instanceof FederationNamespaceInfo) { + FederationNamespaceInfo other = (FederationNamespaceInfo) info; + return new CompareToBuilder() + .append(nameserviceId, other.nameserviceId) + .append(clusterId, other.clusterId) + .append(blockPoolId, other.blockPoolId) + .toComparison(); + } + return super.compareTo(info); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MembershipStoreImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MembershipStoreImpl.java index 57b7b618b04fe..a63a0f3b3ab0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MembershipStoreImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MembershipStoreImpl.java @@ -213,12 +213,15 @@ public boolean loadCache(boolean force) throws IOException { nnRegistrations.put(nnId, nnRegistration); } nnRegistration.add(membership); - String bpId = membership.getBlockPoolId(); - String cId = membership.getClusterId(); - String nsId = membership.getNameserviceId(); - FederationNamespaceInfo nsInfo = - new FederationNamespaceInfo(bpId, cId, nsId); - this.activeNamespaces.add(nsInfo); + if (membership.getState() + != FederationNamenodeServiceState.UNAVAILABLE) { + String bpId = membership.getBlockPoolId(); + String cId = membership.getClusterId(); + String nsId = membership.getNameserviceId(); + FederationNamespaceInfo nsInfo = + new FederationNamespaceInfo(bpId, cId, nsId); + this.activeNamespaces.add(nsInfo); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestFederationNamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestFederationNamespaceInfo.java new file mode 100644 index 0000000000000..72681230c8b0d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestFederationNamespaceInfo.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +import org.junit.Test; + +import java.util.Set; +import java.util.TreeSet; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TestFederationNamespaceInfo { + /** + * Regression test for HDFS-15900. + */ + @Test + public void testHashCode() { + Set set = new TreeSet<>(); + // set an empty bpId first + set.add(new FederationNamespaceInfo("", "nn1", "ns1")); + set.add(new FederationNamespaceInfo("bp1", "nn2", "ns1")); + assertThat(set).hasSize(2); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java index 857cc2362d75d..63bc6235a6116 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java @@ -33,13 +33,17 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegistrationsRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamenodeRegistrationsResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.NamenodeHeartbeatRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.NamenodeHeartbeatResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateNamenodeRegistrationRequest; @@ -473,6 +477,56 @@ public void testRegistrationExpiredAndDeletion() }, 100, 3000); } + @Test + public void testNamespaceInfoWithUnavailableNameNodeRegistration() + throws IOException { + // Populate the state store with one ACTIVE NameNode entry + // and one UNAVAILABLE NameNode entry + // 1) ns0:nn0 - ACTIVE + // 2) ns0:nn1 - UNAVAILABLE + List registrationList = new ArrayList<>(); + String router = ROUTERS[0]; + String ns = NAMESERVICES[0]; + String rpcAddress = "testrpcaddress"; + String serviceAddress = "testserviceaddress"; + String lifelineAddress = "testlifelineaddress"; + String blockPoolId = "testblockpool"; + String clusterId = "testcluster"; + String webScheme = "http"; + String webAddress = "testwebaddress"; + boolean safemode = false; + + MembershipState record = MembershipState.newInstance( + router, ns, NAMENODES[0], clusterId, blockPoolId, + rpcAddress, serviceAddress, lifelineAddress, webScheme, + webAddress, FederationNamenodeServiceState.ACTIVE, safemode); + registrationList.add(record); + + // Set empty clusterId and blockPoolId for UNAVAILABLE NameNode + record = MembershipState.newInstance( + router, ns, NAMENODES[1], "", "", + rpcAddress, serviceAddress, lifelineAddress, webScheme, + webAddress, FederationNamenodeServiceState.UNAVAILABLE, safemode); + registrationList.add(record); + + registerAndLoadRegistrations(registrationList); + + GetNamespaceInfoRequest request = GetNamespaceInfoRequest.newInstance(); + GetNamespaceInfoResponse response + = membershipStore.getNamespaceInfo(request); + Set namespaces = response.getNamespaceInfo(); + + // Verify only one namespace is registered + assertEquals(1, namespaces.size()); + + // Verify the registered namespace has a valid pair of clusterId + // and blockPoolId derived from ACTIVE NameNode + FederationNamespaceInfo namespace = namespaces.iterator().next(); + assertEquals(ns, namespace.getNameserviceId()); + assertEquals(clusterId, namespace.getClusterId()); + assertEquals(blockPoolId, namespace.getBlockPoolId()); + } + /** * Get a single namenode membership record from the store. * From 3ee0f01a0d7d01b64af12f9018cbe7bfe29aefd2 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Mon, 29 Mar 2021 23:08:59 +0530 Subject: [PATCH 156/666] HDFS-15922. Use memcpy for copying non-null terminated string. (#2827) --- .../src/main/native/fuse-dfs/fuse_init.c | 2 +- .../src/main/native/libhdfs-tests/test_libhdfs_ops.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_init.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_init.c index 4da6da0fa91d9..205a7f40fded0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_init.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_init.c @@ -74,7 +74,7 @@ static void init_protectedpaths(dfs_context *dfs) } dfs->protectedpaths[j] = (char*)malloc(sizeof(char)*length+1); assert(dfs->protectedpaths[j]); - strncpy(dfs->protectedpaths[j], tmp, length); + memcpy(dfs->protectedpaths[j], tmp, length); dfs->protectedpaths[j][length] = '\0'; if (eos) { tmp = eos + 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c index 23fa2e51128ba..a3058bbe6ec06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c @@ -58,7 +58,7 @@ void permission_disp(short permissions, char *rtr) { default: perm = "???"; } - strncpy(rtr, perm, 3); + memcpy(rtr, perm, 3); rtr+=3; } } From 425d2641a3aa545dd6042a2a8af0a0c1b975d250 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 29 Mar 2021 23:22:54 +0530 Subject: [PATCH 157/666] HDFS-15926 : Removed duplicate dependency of hadoop-annotations (#2823). Contributed by Viraj Jasani. Signed-off-by: Ayush Saxena --- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 96616254144a3..95ac71bae22c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -35,11 +35,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.hadoop - hadoop-annotations - provided - org.apache.hadoop hadoop-auth From c9d0dac91daef73dbe6807c517b6ec94b1aa0e6a Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Tue, 30 Mar 2021 09:39:00 +0530 Subject: [PATCH 158/666] YARN-10437. Destroy yarn service if any YarnException occurs during submitApp.Contributed by D M Murali Krishna Reddy --- .../yarn/service/client/ServiceClient.java | 16 +++++++- .../yarn/service/TestYarnNativeServices.java | 41 +++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index 6108338f25712..901b81fe14946 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -557,7 +557,13 @@ public ApplicationId actionCreate(Service service) // Write the definition first and then submit - AM will read the definition ServiceApiUtil.createDirAndPersistApp(fs, appDir, service); - ApplicationId appId = submitApp(service); + ApplicationId appId; + try { + appId = submitApp(service); + } catch(YarnException e){ + actionDestroy(serviceName); + throw e; + } cachedAppInfo.put(serviceName, new AppInfo(appId, service .getKerberosPrincipal().getPrincipalName())); service.setId(appId.toString()); @@ -1362,7 +1368,13 @@ public ApplicationId actionStartAndGetId(String serviceName) throws ServiceApiUtil.validateAndResolveService(service, fs, getConfig()); // see if it is actually running and bail out; verifyNoLiveAppInRM(serviceName, "start"); - ApplicationId appId = submitApp(service); + ApplicationId appId; + try { + appId = submitApp(service); + } catch (YarnException e) { + actionDestroy(serviceName); + throw e; + } cachedAppInfo.put(serviceName, new AppInfo(appId, service .getKerberosPrincipal().getPrincipalName())); service.setId(appId.toString()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java index 40b411e460ee9..45318b267c38f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java @@ -41,10 +41,12 @@ import org.apache.hadoop.yarn.service.api.records.PlacementPolicy; import org.apache.hadoop.yarn.service.api.records.PlacementScope; import org.apache.hadoop.yarn.service.api.records.PlacementType; +import org.apache.hadoop.yarn.service.api.records.Resource; import org.apache.hadoop.yarn.service.api.records.Service; import org.apache.hadoop.yarn.service.api.records.ServiceState; import org.apache.hadoop.yarn.service.client.ServiceClient; import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.exceptions.SliderException; import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; import org.apache.hadoop.yarn.service.utils.SliderFileSystem; import org.hamcrest.CoreMatchers; @@ -982,4 +984,43 @@ public void testAMFailureValidity() throws Exception { Assert.assertEquals(ServiceState.STABLE, client.getStatus( exampleApp.getName()).getState()); } + + public Service createServiceWithSingleComp(int memory){ + Service service = new Service(); + service.setName("example-app"); + service.setVersion("v1"); + Component component = new Component(); + component.setName("sleep"); + component.setNumberOfContainers(1L); + component.setLaunchCommand("sleep 1000"); + org.apache.hadoop.yarn.service.api.records.Resource resource = new Resource(); + resource.setMemory(Integer.toString(memory)); + resource.setCpus(1); + component.setResource(resource); + service.addComponent(component); + return service; + } + + @Test(timeout = 200000) + public void testServiceSameNameWithFailure() throws Exception{ + setupInternal(NUM_NMS); + ServiceClient client = createClient(getConf()); + try { + client.actionCreate(createServiceWithSingleComp(1024000)); + Assert.fail("Service should throw YarnException as memory is " + + "configured as 1000GB, which is more than allowed"); + } catch (YarnException e) { + Assert.assertTrue(true); + } + Service service = createServiceWithSingleComp(128); + try { + client.actionCreate(service); + } catch (SliderException e){ + Assert.fail("Not able to submit service as the files related to" + + " failed service with same name are not cleared"); + } + waitForServiceToBeStable(client,service); + client.actionStop(service.getName(), true); + client.actionDestroy(service.getName()); + } } From 6a1838cb34c7bf3e18d395177b9f27bba003a45e Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Tue, 30 Mar 2021 09:46:12 +0530 Subject: [PATCH 159/666] YARN-10439. Yarn Service AM listens on all IP's on the machine. Contributed by D M Murali Krishna Reddy --- .../hadoop/yarn/service/ClientAMService.java | 19 ++++++++++++++----- .../hadoop/yarn/service/ServiceMaster.java | 7 ++++++- .../yarn/service/conf/YarnServiceConf.java | 2 ++ .../hadoop/yarn/service/MockServiceAM.java | 10 ++++++++++ .../markdown/yarn-service/Configurations.md | 1 + 5 files changed, 33 insertions(+), 6 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java index 72ac550ab5cc6..342d8d853d88e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.service; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; @@ -53,8 +54,10 @@ import org.apache.hadoop.yarn.service.component.ComponentEvent; import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent; import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; import org.apache.hadoop.yarn.service.utils.FilterUtils; import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,6 +67,7 @@ import static org.apache.hadoop.yarn.service.component.ComponentEventType.DECOMMISSION_INSTANCE; import static org.apache.hadoop.yarn.service.component.ComponentEventType.FLEX; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_AM_CLIENT_PORT_RANGE; public class ClientAMService extends AbstractService implements ClientAMProtocol { @@ -84,9 +88,11 @@ public ClientAMService(ServiceContext context) { @Override protected void serviceStart() throws Exception { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); - InetSocketAddress address = new InetSocketAddress(0); + String nodeHostString = getNMHostName(); + + InetSocketAddress address = new InetSocketAddress(nodeHostString, 0); server = rpc.getServer(ClientAMProtocol.class, this, address, conf, - context.secretManager, 1); + context.secretManager, 1, YARN_SERVICE_AM_CLIENT_PORT_RANGE); // Enable service authorization? if (conf.getBoolean( @@ -97,9 +103,6 @@ public ClientAMService(ServiceContext context) { server.start(); - String nodeHostString = - System.getenv(ApplicationConstants.Environment.NM_HOST.name()); - bindAddress = NetUtils.createSocketAddrForHost(nodeHostString, server.getListenerAddress().getPort()); @@ -107,6 +110,12 @@ public ClientAMService(ServiceContext context) { super.serviceStart(); } + @VisibleForTesting + String getNMHostName() throws BadClusterStateException { + return ServiceUtils.mandatoryEnvVariable( + ApplicationConstants.Environment.NM_HOST.name()); + } + @Override protected void serviceStop() throws Exception { if (server != null) { server.stop(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java index 670fc21f0dcc3..3120fad7870c5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java @@ -129,7 +129,7 @@ protected void serviceInit(Configuration conf) throws Exception { DefaultMetricsSystem.initialize("ServiceAppMaster"); context.secretManager = new ClientToAMTokenSecretManager(attemptId, null); - ClientAMService clientAMService = new ClientAMService(context); + ClientAMService clientAMService = createClientAMService(); context.clientAMService = clientAMService; addService(clientAMService); @@ -143,6 +143,11 @@ protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); } + @VisibleForTesting + protected ClientAMService createClientAMService() { + return new ClientAMService(context); + } + // Record the tokens and use them for launching containers. // e.g. localization requires the hdfs delegation tokens @VisibleForTesting diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java index 86c4de2ef8995..d3716b919337d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java @@ -49,6 +49,8 @@ public class YarnServiceConf { public static final long DEFAULT_AM_FAILURES_VALIDITY_INTERVAL = -1; public static final String AM_RESOURCE_MEM = "yarn.service.am-resource.memory"; public static final long DEFAULT_KEY_AM_RESOURCE_MEM = 1024; + public static final String YARN_SERVICE_AM_CLIENT_PORT_RANGE = + YARN_SERVICE_PREFIX + "am.client.port-range"; public static final String YARN_QUEUE = "yarn.service.queue"; public static final String DEFAULT_YARN_QUEUE = "default"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java index cf2b1f28920d8..de1a4b98de4d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java @@ -129,6 +129,16 @@ protected Path getAppDir() { return path; } + @Override + protected ClientAMService createClientAMService() { + return new ClientAMService(context) { + @Override + String getNMHostName() { + return "0.0.0.0"; + } + }; + } + @Override protected ServiceScheduler createServiceScheduler(ServiceContext context) throws IOException, YarnException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md index 53ffa07ab43f9..e81170322de26 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md @@ -132,6 +132,7 @@ The above config allows the service AM to be retried a maximum of 10 times. |yarn.service.rolling-log.include-pattern | Regex expression for including log files by name when aggregating the logs while app is running.| |yarn.service.rolling-log.exclude-pattern | Regex expression for excluding log files by name when aggregating the logs while app is running. If the log file name matches both include and exclude pattern, this file will be excluded.| |yarn.service.classpath | Comma separated extra class path parameters for yarn services AM. These path elements will be appended to the end of the YARN service AM classpath. | +|yarn.service.am.client.port-range | Range of ports that the Yarn Service AM can use when binding. Leave blank if you want all possible ports. For example 50000-50050,50100-50200. | ### Component-level configuration properties Component-level service AM configuration properties can be specified either in the cluster `yarn-site.xml` at the global level (effectively overriding the default values system-wide), specified per service in the `properties` field of the `Configuration` object, or specified per component in the `properties` field of the component's `Configuration` object. From 23acda850ad0ddb617ff01f38f18f4591a4b7e2b Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Tue, 30 Mar 2021 09:51:47 +0530 Subject: [PATCH 160/666] YARN-10441. Add support for hadoop.http.rmwebapp.scheduler.page.class. Contributed by D M Murali Krishna Reddy --- .../org/apache/hadoop/yarn/conf/YarnConfiguration.java | 10 ++++++++++ .../hadoop/yarn/conf/TestYarnConfigurationFields.java | 3 +++ 2 files changed, 13 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 1888ffb64c045..2cf4a3bccbfab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -120,6 +120,8 @@ private static void addDeprecatedKeys() { CommonConfigurationKeys.ZK_TIMEOUT_MS), new DeprecationDelta(RM_ZK_RETRY_INTERVAL_MS, CommonConfigurationKeys.ZK_RETRY_INTERVAL_MS), + new DeprecationDelta(HADOOP_HTTP_WEBAPP_SCHEDULER_PAGE, + YARN_HTTP_WEBAPP_SCHEDULER_PAGE) }); Configuration.addDeprecations(new DeprecationDelta[] { new DeprecationDelta("yarn.resourcemanager.display.per-user-apps", @@ -2487,6 +2489,14 @@ public static boolean isAclEnabled(Configuration conf) { public static final String YARN_HTTP_WEBAPP_EXTERNAL_CLASSES = "yarn.http.rmwebapp.external.classes"; + /** + * @deprecated This field is deprecated for + * {@link #YARN_HTTP_WEBAPP_SCHEDULER_PAGE} + */ + @Deprecated + public static final String HADOOP_HTTP_WEBAPP_SCHEDULER_PAGE = + "hadoop.http.rmwebapp.scheduler.page.class"; + public static final String YARN_HTTP_WEBAPP_SCHEDULER_PAGE = "yarn.http.rmwebapp.scheduler.page.class"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java index 9fda8094c9b78..3dcd5cc3bed60 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java @@ -157,6 +157,9 @@ public void initializeMemberVariables() { configurationPropsToSkipCompare .add(YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE); + configurationPropsToSkipCompare + .add(YarnConfiguration.HADOOP_HTTP_WEBAPP_SCHEDULER_PAGE); + // Ignore NodeManager "work in progress" variables configurationPrefixToSkipCompare .add(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED); From 21f54b42efb1986dd1e2b9a7af407956c807625f Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Tue, 30 Mar 2021 13:48:40 +0530 Subject: [PATCH 161/666] YARN-10439. addendum fix for shaded guva. --- .../java/org/apache/hadoop/yarn/service/ClientAMService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java index 342d8d853d88e..a06a0e69c0c60 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; From f77c9c33b708f3f39291f211cdc1061a8eb30412 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Tue, 30 Mar 2021 13:52:07 +0530 Subject: [PATCH 162/666] YARN-10466.Fix NullPointerException in yarn-services Component.java. Contributed by D M Murali Krishna Reddy --- .../org/apache/hadoop/yarn/service/component/Component.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java index 0b0ba7945d654..0e031f4ba51e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java @@ -830,6 +830,11 @@ public void requestContainers(long count) { targetExpressions.toArray(new TargetExpression[0])).build(); break; } + if (constraint == null) { + LOG.info("[COMPONENT {}] Placement constraint: null ", + componentSpec.getName()); + continue; + } // The default AND-ed final composite constraint if (finalConstraint != null) { finalConstraint = PlacementConstraints From 459467c94419085a1b2b281497c5fdd621207e4c Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Tue, 30 Mar 2021 17:46:51 +0900 Subject: [PATCH 163/666] YARN-10716. Fix typo in ContainerRuntime. Contributed by xishuhai. --- .../nodemanager/containermanager/runtime/ContainerRuntime.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java index 92fd86d151471..b024ada8397bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java @@ -30,7 +30,7 @@ * An abstraction for various container runtime implementations. Examples * include Process Tree, Docker, Appc runtimes etc. These implementations * are meant for low-level OS container support - dependencies on - * higher-level node mananger constructs should be avoided. + * higher-level node manager constructs should be avoided. */ @InterfaceAudience.Private From 6eada4dd5f5e25f7858552c41d513fd7f7eb150a Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Tue, 30 Mar 2021 23:07:10 +0530 Subject: [PATCH 164/666] HDFS-15928. Replace RAND_pseudo_bytes in rpc_engine.cc (#2825) --- .../src/main/native/libhdfspp/lib/rpc/request.cc | 8 +++++++- .../main/native/libhdfspp/lib/rpc/rpc_engine.cc | 15 +++++++++++---- .../main/native/libhdfspp/lib/rpc/rpc_engine.h | 8 ++++---- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc index 2de26fd0eab21..ecac2bcb00c59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc @@ -100,13 +100,19 @@ static void SetRequestHeader(std::weak_ptr weak_engine, int c return; } + const auto& client_id = counted_engine->client_id(); + if (client_id == nullptr) { + LOG_ERROR(kRPC, << "Failed to generate client ID"); + return; + } + rpc_header->set_rpckind(RPC_PROTOCOL_BUFFER); rpc_header->set_rpcop(RpcRequestHeaderProto::RPC_FINAL_PACKET); rpc_header->set_callid(call_id); if (retry_count != kNoRetry) { rpc_header->set_retrycount(retry_count); } - rpc_header->set_clientid(counted_engine->client_id()); + rpc_header->set_clientid(*client_id); req_header->set_methodname(method_name); req_header->set_declaringclassprotocolname(counted_engine->protocol_name()); req_header->set_clientprotocolversion(counted_engine->protocol_version()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc index 06cda962cf9b0..065dffad96baa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc @@ -23,8 +23,11 @@ #include "common/optional_wrapper.h" #include +#include #include +#include +#include namespace hdfs { @@ -111,8 +114,7 @@ std::unique_ptr RpcEngine::MakeRetryPolicy(const Options &opt } } -std::string RpcEngine::getRandomClientId() -{ +std::unique_ptr RpcEngine::getRandomClientId() { /** * The server is requesting a 16-byte UUID: * https://github.com/c9n/hadoop/blob/master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java @@ -121,14 +123,19 @@ std::string RpcEngine::getRandomClientId() * https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 **/ std::vectorbuf(16); - RAND_pseudo_bytes(&buf[0], buf.size()); + if (RAND_bytes(&buf[0], static_cast(buf.size())) != 1) { + const auto *error = ERR_reason_error_string(ERR_get_error()); + LOG_ERROR(kRPC, << "Unable to generate random client ID, err : " << error); + return nullptr; + } //clear the first four bits of byte 6 then set the second bit buf[6] = (buf[6] & 0x0f) | 0x40; //clear the second bit of byte 8 and set the first bit buf[8] = (buf[8] & 0xbf) | 0x80; - return std::string(reinterpret_cast(&buf[0]), buf.size()); + return std::unique_ptr( + new std::string(reinterpret_cast(&buf[0]), buf.size())); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h index 13e56c5b92fb8..3bf7dcabda063 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h @@ -80,7 +80,7 @@ class LockFreeRpcEngine { virtual int NextCallId() = 0; virtual const std::string &client_name() = 0; - virtual const std::string &client_id() = 0; + virtual const std::unique_ptr &client_id() = 0; virtual const std::string &user_name() = 0; virtual const std::string &protocol_name() = 0; virtual int protocol_version() = 0; @@ -142,7 +142,7 @@ class RpcEngine : public LockFreeRpcEngine, public std::enable_shared_from_this< std::unique_ptr TEST_GenerateRetryPolicyUsingOptions(); const std::string &client_name() override { return client_name_; } - const std::string &client_id() override { return client_id_; } + const std::unique_ptr &client_id() override { return client_id_; } const std::string &user_name() override { return auth_info_.getUser(); } const std::string &protocol_name() override { return protocol_name_; } int protocol_version() override { return protocol_version_; } @@ -157,7 +157,7 @@ class RpcEngine : public LockFreeRpcEngine, public std::enable_shared_from_this< virtual std::shared_ptr NewConnection(); virtual std::unique_ptr MakeRetryPolicy(const Options &options); - static std::string getRandomClientId(); + static std::unique_ptr getRandomClientId(); // Remember all of the last endpoints in case we need to reconnect and retry std::vector last_endpoints_; @@ -166,7 +166,7 @@ class RpcEngine : public LockFreeRpcEngine, public std::enable_shared_from_this< mutable std::shared_ptr io_service_; const Options options_; const std::string client_name_; - const std::string client_id_; + const std::unique_ptr client_id_; const std::string protocol_name_; const int protocol_version_; std::unique_ptr retry_policy_; //null --> no retry From 6c6ce89a19732699c8b9f046475bf318d7109d98 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Wed, 31 Mar 2021 02:05:45 +0530 Subject: [PATCH 165/666] HDFS-15927. Catch polymorphic type by reference (#2824) --- .../main/native/libhdfspp/lib/common/hdfs_configuration.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/hdfs_configuration.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/hdfs_configuration.cc index 07e2edc68563c..7b1496c98cd5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/hdfs_configuration.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/hdfs_configuration.cc @@ -135,7 +135,7 @@ std::vector HdfsConfiguration::LookupNameService(const std::string URI uri; try { uri = URI::parse_from_string(PrependHdfsScheme(Get(dom_node_name))); - } catch (const uri_parse_error) { + } catch (const uri_parse_error&) { throw ha_parse_error("unable to find " + dom_node_name); } @@ -148,7 +148,7 @@ std::vector HdfsConfiguration::LookupNameService(const std::string NamenodeInfo node(nameservice, *node_id, uri); namenodes.push_back(node); } - } catch (ha_parse_error e) { + } catch (const ha_parse_error& e) { LOG_ERROR(kRPC, << "HA cluster detected but failed because : " << e.what()); namenodes.clear(); // Don't return inconsistent view } From 1a81a0b661a37eeb5a450f92c67f02f5fd7f19b7 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Wed, 31 Mar 2021 02:06:02 +0530 Subject: [PATCH 166/666] HDFS-15929. Replace RAND_pseudo_bytes in util.cc (#2826) --- .../main/native/libhdfspp/lib/common/util.cc | 14 ++--- .../main/native/libhdfspp/lib/common/util.h | 3 +- .../native/libhdfspp/lib/fs/filehandle.cc | 11 +++- .../main/native/libhdfspp/lib/fs/filehandle.h | 13 +++-- .../main/native/libhdfspp/lib/fs/filesystem.h | 3 +- .../libhdfspp/lib/fs/namenode_operations.h | 4 +- .../libhdfspp/lib/rpc/rpc_connection_impl.cc | 8 ++- .../native/libhdfspp/lib/rpc/rpc_engine.cc | 3 +- .../native/libhdfspp/lib/rpc/rpc_engine.h | 9 ++-- .../libhdfspp/tests/bad_datanode_test.cc | 28 ++++++++-- .../tests/remote_block_reader_test.cc | 16 ++++-- .../native/libhdfspp/tests/rpc_engine_test.cc | 54 +++++++++++++------ 12 files changed, 119 insertions(+), 47 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc index 7a4b4cf33efed..8f6e77a53959f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc @@ -26,7 +26,7 @@ #include #include #include - +#include namespace hdfs { @@ -73,18 +73,20 @@ int DelimitedPBMessageSize(const ::google::protobuf::MessageLite *msg) { return ::google::protobuf::io::CodedOutputStream::VarintSize32(size) + size; } -std::string GetRandomClientName() { +std::shared_ptr GetRandomClientName() { std::vectorbuf(8); - RAND_pseudo_bytes(&buf[0], 8); + if (RAND_bytes(&buf[0], static_cast(buf.size())) != 1) { + return nullptr; + } std::ostringstream oss; oss << "DFSClient_" << getpid() << "_" << std::this_thread::get_id() << "_" << std::setw(2) << std::hex << std::uppercase << std::setfill('0'); - for (unsigned char b: buf) + for (auto b : buf) { oss << static_cast(b); - - return oss.str(); + } + return std::make_shared(oss.str()); } std::string Base64Encode(const std::string &src) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h index a7f4f958e79d5..140f66e8482d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h @@ -22,6 +22,7 @@ #include "common/logging.h" #include +#include #include #include @@ -61,7 +62,7 @@ std::string SerializeDelimitedProtobufMessage(const ::google::protobuf::MessageL std::string Base64Encode(const std::string &src); // Return a new high-entropy client name -std::string GetRandomClientName(); +std::shared_ptr GetRandomClientName(); // Returns true if _someone_ is holding the lock (not necessarily this thread, // but a std::mutex doesn't track which thread is holding the lock) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc index 169def364b732..7c9e24c0d883a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc @@ -24,6 +24,8 @@ #include "hdfspp/events.h" #include +#include +#include #include #include @@ -38,7 +40,7 @@ FileHandle::~FileHandle() {} FileHandleImpl::FileHandleImpl(const std::string & cluster_name, const std::string & path, - std::shared_ptr io_service, const std::string &client_name, + std::shared_ptr io_service, const std::shared_ptr &client_name, const std::shared_ptr file_info, std::shared_ptr bad_data_nodes, std::shared_ptr event_handlers) @@ -191,6 +193,11 @@ void FileHandleImpl::AsyncPreadSome( return; } + if (client_name_ == nullptr) { + handler(Status::Error("AsyncPreadSome: Unable to generate random client name"), "", 0); + return; + } + /** * Note: block and chosen_dn will end up pointing to things inside * the blocks_ vector. They shouldn't be directly deleted. @@ -245,7 +252,7 @@ void FileHandleImpl::AsyncPreadSome( // steal the FileHandle's dn and put it back when we're done std::shared_ptr dn = CreateDataNodeConnection(io_service_, chosen_dn, &block->blocktoken()); std::string dn_id = dn->uuid_; - std::string client_name = client_name_; + std::string client_name = *client_name_; // Wrap the DN in a block reader to handle the state and logic of the // block request protocol diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.h index 57da237f977c7..724b1a14bc21d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.h @@ -29,7 +29,9 @@ #include "bad_datanode_tracker.h" #include "ClientNamenodeProtocol.pb.h" +#include #include +#include namespace hdfs { @@ -51,10 +53,11 @@ class FileHandleImpl : public FileHandle { MEMCHECKED_CLASS(FileHandleImpl) FileHandleImpl(const std::string & cluster_name, const std::string & path, - std::shared_ptr io_service, const std::string &client_name, - const std::shared_ptr file_info, - std::shared_ptr bad_data_nodes, - std::shared_ptr event_handlers); + std::shared_ptr io_service, + const std::shared_ptr &client_name, + const std::shared_ptr file_info, + std::shared_ptr bad_data_nodes, + std::shared_ptr event_handlers); /* * Reads the file at the specified offset into the buffer. @@ -129,7 +132,7 @@ class FileHandleImpl : public FileHandle { const std::string cluster_name_; const std::string path_; std::shared_ptr io_service_; - const std::string client_name_; + const std::shared_ptr client_name_; const std::shared_ptr file_info_; std::shared_ptr bad_node_tracker_; bool CheckSeekBounds(ssize_t desired_position); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h index 935e7c96c7b02..7fdb6a1e3f2cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h @@ -23,6 +23,7 @@ #include "hdfspp/hdfspp.h" #include "reader/fileinfo.h" +#include #include namespace hdfs { @@ -217,7 +218,7 @@ class FileSystemImpl : public FileSystem { **/ std::shared_ptr io_service_; const Options options_; - const std::string client_name_; + const std::shared_ptr client_name_; std::string cluster_name_; NameNodeOperations nn_; std::shared_ptr bad_node_tracker_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h index 3470a48b3c710..445aa08653dcf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h @@ -26,6 +26,8 @@ #include "ClientNamenodeProtocol.pb.h" #include "ClientNamenodeProtocol.hrpc.inl" +#include +#include namespace hdfs { @@ -43,7 +45,7 @@ class NameNodeOperations { public: MEMCHECKED_CLASS(NameNodeOperations) NameNodeOperations(std::shared_ptr io_service, const Options &options, - const std::string &client_name, const std::string &user_name, + const std::shared_ptr &client_name, const std::string &user_name, const char *protocol_name, int protocol_version) : io_service_(io_service), engine_(std::make_shared(io_service, options, client_name, user_name, protocol_name, protocol_version)), diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc index 82fdfeb033d38..a5de92e61bf3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc @@ -306,13 +306,19 @@ std::shared_ptr RpcConnection::PrepareContextPacket() { return std::make_shared(); } + const auto& client_name = pinnedEngine->client_name(); + if (client_name == nullptr) { + LOG_ERROR(kRPC, << "RpcConnection@" << this << " unable to generate random client name"); + return std::make_shared(); + } + std::shared_ptr serializedPacketBuffer = std::make_shared(); RpcRequestHeaderProto headerProto; headerProto.set_rpckind(RPC_PROTOCOL_BUFFER); headerProto.set_rpcop(RpcRequestHeaderProto::RPC_FINAL_PACKET); headerProto.set_callid(RpcEngine::kCallIdConnectionContext); - headerProto.set_clientid(pinnedEngine->client_name()); + headerProto.set_clientid(*client_name); IpcConnectionContextProto handshakeContextProto; handshakeContextProto.set_protocol(pinnedEngine->protocol_name()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc index 065dffad96baa..e3274cb88aacf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc @@ -24,6 +24,7 @@ #include #include +#include #include #include @@ -36,7 +37,7 @@ using optional = std::experimental::optional; RpcEngine::RpcEngine(std::shared_ptr io_service, const Options &options, - const std::string &client_name, const std::string &user_name, + const std::shared_ptr &client_name, const std::string &user_name, const char *protocol_name, int protocol_version) : io_service_(io_service), options_(options), diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h index 3bf7dcabda063..1445a1860de77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.h @@ -38,6 +38,7 @@ #include #include #include +#include namespace hdfs { @@ -79,7 +80,7 @@ class LockFreeRpcEngine { virtual const RetryPolicy *retry_policy() = 0; virtual int NextCallId() = 0; - virtual const std::string &client_name() = 0; + virtual const std::shared_ptr &client_name() = 0; virtual const std::unique_ptr &client_id() = 0; virtual const std::string &user_name() = 0; virtual const std::string &protocol_name() = 0; @@ -109,7 +110,7 @@ class RpcEngine : public LockFreeRpcEngine, public std::enable_shared_from_this< }; RpcEngine(std::shared_ptr service, const Options &options, - const std::string &client_name, const std::string &user_name, + const std::shared_ptr &client_name, const std::string &user_name, const char *protocol_name, int protocol_version); void Connect(const std::string & cluster_name, @@ -141,7 +142,7 @@ class RpcEngine : public LockFreeRpcEngine, public std::enable_shared_from_this< void TEST_SetRetryPolicy(std::unique_ptr policy); std::unique_ptr TEST_GenerateRetryPolicyUsingOptions(); - const std::string &client_name() override { return client_name_; } + const std::shared_ptr &client_name() override { return client_name_; } const std::unique_ptr &client_id() override { return client_id_; } const std::string &user_name() override { return auth_info_.getUser(); } const std::string &protocol_name() override { return protocol_name_; } @@ -165,7 +166,7 @@ class RpcEngine : public LockFreeRpcEngine, public std::enable_shared_from_this< private: mutable std::shared_ptr io_service_; const Options options_; - const std::string client_name_; + const std::shared_ptr client_name_; const std::unique_ptr client_id_; const std::string protocol_name_; const int protocol_version_; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc index 5417af8f4cf11..911f7cae28a90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc @@ -23,11 +23,16 @@ #include "fs/bad_datanode_tracker.h" #include "reader/block_reader.h" +#include #include +#include +#include #include #include + + using hadoop::common::TokenProto; using hadoop::hdfs::DatanodeInfoProto; using hadoop::hdfs::DatanodeIDProto; @@ -139,7 +144,10 @@ TEST(BadDataNodeTest, TestNoNodes) { auto monitors = std::make_shared(); bad_node_tracker->AddBadNode("foo"); - PartialMockFileHandle is("cluster", "file", io_service, GetRandomClientName(), file_info, bad_node_tracker, monitors); + const auto client_name = GetRandomClientName(); + ASSERT_NE(client_name, nullptr); + + PartialMockFileHandle is("cluster", "file", io_service, client_name, file_info, bad_node_tracker, monitors); Status stat; size_t read = 0; @@ -195,7 +203,11 @@ TEST(BadDataNodeTest, NNEventCallback) { return event_response::make_ok(); }); - PartialMockFileHandle is("cluster", "file", io_service, GetRandomClientName(), file_info, tracker, monitors); + + const auto client_name = GetRandomClientName(); + ASSERT_NE(client_name, nullptr); + + PartialMockFileHandle is("cluster", "file", io_service, client_name, file_info, tracker, monitors); Status stat; size_t read = 0; @@ -241,7 +253,11 @@ TEST(BadDataNodeTest, RecoverableError) { std::shared_ptr io_service = IoService::MakeShared(); auto tracker = std::make_shared(); auto monitors = std::make_shared(); - PartialMockFileHandle is("cluster", "file", io_service, GetRandomClientName(), file_info, tracker, monitors); + + const auto client_name = GetRandomClientName(); + ASSERT_NE(client_name, nullptr); + + PartialMockFileHandle is("cluster", "file", io_service, client_name, file_info, tracker, monitors); Status stat; size_t read = 0; EXPECT_CALL(*is.mock_reader_, AsyncReadBlock(_,_,_,_,_)) @@ -292,7 +308,11 @@ TEST(BadDataNodeTest, InternalError) { std::shared_ptr io_service = IoService::MakeShared(); auto tracker = std::make_shared(); auto monitors = std::make_shared(); - PartialMockFileHandle is("cluster", "file", io_service, GetRandomClientName(), file_info, tracker, monitors); + + const auto client_name = GetRandomClientName(); + ASSERT_NE(client_name, nullptr); + + PartialMockFileHandle is("cluster", "file", io_service, client_name, file_info, tracker, monitors); Status stat; size_t read = 0; EXPECT_CALL(*is.mock_reader_, AsyncReadBlock(_,_,_,_,_)) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc index dfee686b60244..4843da917865f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc @@ -27,8 +27,10 @@ #include #include -#include #include +#include +#include +#include #include #include #include @@ -165,8 +167,10 @@ TEST(RemoteBlockReaderTest, TestReadSingleTrunk) { EXPECT_CALL(reader, AsyncReadPacket(_, _)) .WillOnce(InvokeArgument<1>(Status::OK(), sizeof(buf))); + const auto client_name = GetRandomClientName(); + ASSERT_NE(client_name, nullptr); reader.AsyncReadBlock( - GetRandomClientName(), block, 0, boost::asio::buffer(buf, sizeof(buf)), + *client_name, block, 0, boost::asio::buffer(buf, sizeof(buf)), [&stat, &read](const Status &status, size_t transferred) { stat = status; read = transferred; @@ -192,8 +196,10 @@ TEST(RemoteBlockReaderTest, TestReadMultipleTrunk) { .Times(4) .WillRepeatedly(InvokeArgument<1>(Status::OK(), sizeof(buf) / 4)); + const auto client_name = GetRandomClientName(); + ASSERT_NE(client_name, nullptr); reader.AsyncReadBlock( - GetRandomClientName(), block, 0, boost::asio::buffer(buf, sizeof(buf)), + *client_name, block, 0, boost::asio::buffer(buf, sizeof(buf)), [&stat, &read](const Status &status, size_t transferred) { stat = status; read = transferred; @@ -220,8 +226,10 @@ TEST(RemoteBlockReaderTest, TestReadError) { .WillOnce(InvokeArgument<1>(Status::OK(), sizeof(buf) / 4)) .WillOnce(InvokeArgument<1>(Status::Error("error"), 0)); + const auto client_name = GetRandomClientName(); + ASSERT_NE(client_name, nullptr); reader.AsyncReadBlock( - GetRandomClientName(), block, 0, boost::asio::buffer(buf, sizeof(buf)), + *client_name, block, 0, boost::asio::buffer(buf, sizeof(buf)), [&stat, &read](const Status &status, size_t transferred) { stat = status; read = transferred; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc index 744e7eba16d8e..931f873d6deb6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc @@ -24,8 +24,14 @@ #include "rpc/rpc_connection_impl.h" #include "common/namenode_info.h" +#include +#include + #include +#include #include +#include +#include #include #include @@ -108,7 +114,9 @@ TEST(RpcEngineTest, TestRoundTrip) { std::shared_ptr io_service = IoService::MakeShared(); Options options; - std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); + auto engine = std::make_shared( + io_service, options, std::make_shared("foo"), "", "protocol", + 1); auto conn = std::make_shared >(engine); conn->TEST_set_connected(true); @@ -144,7 +152,9 @@ TEST(RpcEngineTest, TestRoundTrip) { TEST(RpcEngineTest, TestConnectionResetAndFail) { std::shared_ptr io_service = IoService::MakeShared(); Options options; - std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); + auto engine = std::make_shared( + io_service, options, std::make_shared("foo"), "", "protocol", + 1); auto conn = std::make_shared >(engine); conn->TEST_set_connected(true); @@ -181,8 +191,9 @@ TEST(RpcEngineTest, TestConnectionResetAndRecover) { Options options; options.max_rpc_retries = 1; options.rpc_retry_delay_ms = 0; - std::shared_ptr engine - = std::make_shared(io_service, options, "foo", "", "protocol", 1); + auto engine = std::make_shared( + io_service, options, std::make_shared("foo"), "", "protocol", + 1); // Normally determined during RpcEngine::Connect, but in this case options // provides enough info to determine policy here. @@ -222,8 +233,9 @@ TEST(RpcEngineTest, TestConnectionResetAndRecoverWithDelay) { Options options; options.max_rpc_retries = 1; options.rpc_retry_delay_ms = 1; - std::shared_ptr engine = - std::make_shared(io_service, options, "foo", "", "protocol", 1); + auto engine = std::make_shared( + io_service, options, std::make_shared("foo"), "", "protocol", + 1); // Normally determined during RpcEngine::Connect, but in this case options // provides enough info to determine policy here. @@ -276,8 +288,10 @@ TEST(RpcEngineTest, TestConnectionFailure) Options options; options.max_rpc_retries = 0; options.rpc_retry_delay_ms = 0; - std::shared_ptr engine - = std::make_shared(io_service, options, "foo", "", "protocol", 1); + auto engine = std::make_shared( + io_service, options, std::make_shared("foo"), "", "protocol", + 1); + EXPECT_CALL(*producer, Produce()) .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))); @@ -303,8 +317,9 @@ TEST(RpcEngineTest, TestConnectionFailureRetryAndFailure) Options options; options.max_rpc_retries = 2; options.rpc_retry_delay_ms = 0; - std::shared_ptr engine = - std::make_shared(io_service, options, "foo", "", "protocol", 1); + auto engine = std::make_shared( + io_service, options, std::make_shared("foo"), "", "protocol", + 1); EXPECT_CALL(*producer, Produce()) .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) @@ -332,8 +347,9 @@ TEST(RpcEngineTest, TestConnectionFailureAndRecover) Options options; options.max_rpc_retries = 1; options.rpc_retry_delay_ms = 0; - std::shared_ptr engine = - std::make_shared(io_service, options, "foo", "", "protocol", 1); + auto engine = std::make_shared( + io_service, options, std::make_shared("foo"), "", "protocol", + 1); EXPECT_CALL(*producer, Produce()) .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) .WillOnce(Return(std::make_pair(boost::system::error_code(), ""))) @@ -355,8 +371,9 @@ TEST(RpcEngineTest, TestEventCallbacks) Options options; options.max_rpc_retries = 99; options.rpc_retry_delay_ms = 0; - std::shared_ptr engine = - std::make_shared(io_service, options, "foo", "", "protocol", 1); + auto engine = std::make_shared( + io_service, options, std::make_shared("foo"), "", "protocol", + 1); // Normally determined during RpcEngine::Connect, but in this case options // provides enough info to determine policy here. @@ -441,8 +458,9 @@ TEST(RpcEngineTest, TestConnectionFailureAndAsyncRecover) Options options; options.max_rpc_retries = 1; options.rpc_retry_delay_ms = 1; - std::shared_ptr engine = - std::make_shared(io_service, options, "foo", "", "protocol", 1); + auto engine = std::make_shared( + io_service, options, std::make_shared("foo"), "", "protocol", + 1); EXPECT_CALL(*producer, Produce()) .WillOnce(Return(std::make_pair(make_error_code(boost::asio::error::connection_reset), ""))) .WillOnce(Return(std::make_pair(boost::system::error_code(), ""))) @@ -466,7 +484,9 @@ TEST(RpcEngineTest, TestTimeout) { std::shared_ptr io_service = IoService::MakeShared(); Options options; options.rpc_timeout = 1; - std::shared_ptr engine = std::make_shared(io_service, options, "foo", "", "protocol", 1); + auto engine = std::make_shared( + io_service, options, std::make_shared("foo"), "", "protocol", + 1); auto conn = std::make_shared >(engine); conn->TEST_set_connected(true); From 5ef5783204537d648391d8ce5e46988f1b3c3846 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Wed, 31 Mar 2021 08:25:20 +0530 Subject: [PATCH 167/666] YARN-10544. AMParams.java having un-necessary access identifier static final. Contributed by ANANDA G B. --- .../mapreduce/v2/app/webapp/AMParams.java | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMParams.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMParams.java index 2ca7ff5471754..4bbd1da7351e3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMParams.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMParams.java @@ -22,13 +22,13 @@ * Params constants for the AM webapp and the history webapp. */ public interface AMParams { - static final String RM_WEB = "rm.web"; - static final String APP_ID = "app.id"; - static final String JOB_ID = "job.id"; - static final String TASK_ID = "task.id"; - static final String TASK_TYPE = "task.type"; - static final String TASK_STATE = "task.state"; - static final String ATTEMPT_STATE = "attempt.state"; - static final String COUNTER_GROUP = "counter.group"; - static final String COUNTER_NAME = "counter.name"; + String RM_WEB = "rm.web"; + String APP_ID = "app.id"; + String JOB_ID = "job.id"; + String TASK_ID = "task.id"; + String TASK_TYPE = "task.type"; + String TASK_STATE = "task.state"; + String ATTEMPT_STATE = "attempt.state"; + String COUNTER_GROUP = "counter.group"; + String COUNTER_NAME = "counter.name"; } From c9799cb0924ecaf959e7b7dc16a6e0124b295373 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Wed, 31 Mar 2021 12:22:53 +0800 Subject: [PATCH 168/666] HDFS-15932. Improve the balancer error message when process exits abnormally. Contributed by Renukaprasad C. --- .../hadoop-common/src/main/bin/hadoop-functions.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh index 4be554aef6c25..c4c3157b9d82b 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh @@ -2206,7 +2206,7 @@ function hadoop_daemon_handler hadoop_verify_logdir hadoop_status_daemon "${daemon_pidfile}" if [[ $? == 0 ]]; then - hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first." + hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first and ensure ${daemon_pidfile} file is empty before retry." exit 1 else # stale pid file, so just remove it and continue on @@ -2267,7 +2267,7 @@ function hadoop_secure_daemon_handler hadoop_verify_logdir hadoop_status_daemon "${daemon_pidfile}" if [[ $? == 0 ]]; then - hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first." + hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first and ensure ${daemon_pidfile} file is empty before retry." exit 1 else # stale pid file, so just remove it and continue on From 789748134dcccfe41f14e8ba642d8df4b6895038 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Wed, 31 Mar 2021 13:00:13 +0800 Subject: [PATCH 169/666] HDFS-15863. RBF: Validation message to be corrected in FairnessPolicyController. Contributed by Renukaprasad C. --- ...aticRouterRpcFairnessPolicyController.java | 35 ++++++++++++------- ...TestRouterRpcFairnessPolicyController.java | 18 ++++++---- 2 files changed, 34 insertions(+), 19 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/fairness/StaticRouterRpcFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/fairness/StaticRouterRpcFairnessPolicyController.java index 49a90758298f0..b4e3dc36b0e37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/fairness/StaticRouterRpcFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/fairness/StaticRouterRpcFairnessPolicyController.java @@ -42,6 +42,10 @@ public class StaticRouterRpcFairnessPolicyController extends private static final Logger LOG = LoggerFactory.getLogger(StaticRouterRpcFairnessPolicyController.class); + public static final String ERROR_MSG = "Configured handlers " + + DFS_ROUTER_HANDLER_COUNT_KEY + '=' + + " %d is less than the minimum required handlers %d"; + public StaticRouterRpcFairnessPolicyController(Configuration conf) { init(conf); } @@ -65,15 +69,13 @@ public void init(Configuration conf) // Insert the concurrent nameservice into the set to process together allConfiguredNS.add(CONCURRENT_NS); + validateHandlersCount(conf, handlerCount, allConfiguredNS); for (String nsId : allConfiguredNS) { int dedicatedHandlers = conf.getInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + nsId, 0); LOG.info("Dedicated handlers {} for ns {} ", dedicatedHandlers, nsId); if (dedicatedHandlers > 0) { handlerCount -= dedicatedHandlers; - // Total handlers should not be less than sum of dedicated - // handlers. - validateCount(nsId, handlerCount, 0); insertNameServiceWithPermits(nsId, dedicatedHandlers); logAssignment(nsId, dedicatedHandlers); } else { @@ -88,8 +90,6 @@ public void init(Configuration conf) int handlersPerNS = handlerCount / unassignedNS.size(); LOG.info("Handlers available per ns {}", handlersPerNS); for (String nsId : unassignedNS) { - // Each NS should have at least one handler assigned. - validateCount(nsId, handlersPerNS, 1); insertNameServiceWithPermits(nsId, handlersPerNS); logAssignment(nsId, handlersPerNS); } @@ -112,15 +112,26 @@ private static void logAssignment(String nsId, int count) { count, nsId); } - private static void validateCount(String nsId, int handlers, int min) throws - IllegalArgumentException { - if (handlers < min) { - String msg = - "Available handlers " + handlers + - " lower than min " + min + - " for nsId " + nsId; + private void validateHandlersCount(Configuration conf, int handlerCount, + Set allConfiguredNS) { + int totalDedicatedHandlers = 0; + for (String nsId : allConfiguredNS) { + int dedicatedHandlers = + conf.getInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + nsId, 0); + if (dedicatedHandlers > 0) { + // Total handlers should not be less than sum of dedicated handlers. + totalDedicatedHandlers += dedicatedHandlers; + } else { + // Each NS should have at least one handler assigned. + totalDedicatedHandlers++; + } + } + if (totalDedicatedHandlers > handlerCount) { + String msg = String.format(ERROR_MSG, handlerCount, + totalDedicatedHandlers); LOG.error(msg); throw new IllegalArgumentException(msg); } } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java index c0c30747d8388..8e816643ac5bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java @@ -85,30 +85,31 @@ public void testHandlerAllocationPreconfigured() { @Test public void testAllocationErrorWithZeroHandlers() { Configuration conf = createConf(0); - verifyInstantiationError(conf); + verifyInstantiationError(conf, 0, 3); } @Test public void testAllocationErrorForLowDefaultHandlers() { Configuration conf = createConf(1); - verifyInstantiationError(conf); + verifyInstantiationError(conf, 1, 3); } @Test public void testAllocationErrorForLowDefaultHandlersPerNS() { Configuration conf = createConf(1); conf.setInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + "concurrent", 1); - verifyInstantiationError(conf); + verifyInstantiationError(conf, 1, 3); } @Test public void testAllocationErrorForLowPreconfiguredHandlers() { Configuration conf = createConf(1); conf.setInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + "ns1", 2); - verifyInstantiationError(conf); + verifyInstantiationError(conf, 1, 4); } - private void verifyInstantiationError(Configuration conf) { + private void verifyInstantiationError(Configuration conf, int handlerCount, + int totalDedicatedHandlers) { GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer .captureLogs(LoggerFactory.getLogger( StaticRouterRpcFairnessPolicyController.class)); @@ -117,8 +118,11 @@ private void verifyInstantiationError(Configuration conf) { } catch (IllegalArgumentException e) { // Ignore the exception as it is expected here. } - assertTrue("Should contain error message", - logs.getOutput().contains("lower than min")); + String errorMsg = String.format( + StaticRouterRpcFairnessPolicyController.ERROR_MSG, handlerCount, + totalDedicatedHandlers); + assertTrue("Should contain error message: " + errorMsg, + logs.getOutput().contains(errorMsg)); } private RouterRpcFairnessPolicyController getFairnessPolicyController( From bec2bce69f1cce6085e4d82d2b38a41ec5a52e14 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Wed, 31 Mar 2021 10:55:14 +0200 Subject: [PATCH 170/666] YARN-10718. Fix CapacityScheduler#initScheduler log error. Contributed by Qi Zhu. --- .../scheduler/capacity/CapacityScheduler.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 70738c0a5a794..37acb608c8a19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -393,9 +393,9 @@ void initScheduler(Configuration configuration) throws } LOG.info("Initialized CapacityScheduler with " + "calculator=" - + getResourceCalculator().getClass() + ", " + "minimumAllocation=<" - + getMinimumResourceCapability() + ">, " + "maximumAllocation=<" - + getMaximumResourceCapability() + ">, " + "asynchronousScheduling=" + + getResourceCalculator().getClass() + ", " + "minimumAllocation=" + + getMinimumResourceCapability() + ", " + "maximumAllocation=" + + getMaximumResourceCapability() + ", " + "asynchronousScheduling=" + scheduleAsynchronously + ", " + "asyncScheduleInterval=" + asyncScheduleInterval + "ms" + ",multiNodePlacementEnabled=" + multiNodePlacementEnabled); From a4f9a3a15c2911533abfae4d5873101dbc05e1e1 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Wed, 31 Mar 2021 16:14:03 +0200 Subject: [PATCH 171/666] YARN-10597. CSMappingPlacementRule should not create new instance of Groups. Contributed by Gergely Pollak --- .../resourcemanager/placement/CSMappingPlacementRule.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java index 07aeb1297c6d1..894bc8262221a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java @@ -132,11 +132,7 @@ public boolean initialize(ResourceScheduler scheduler) throws IOException { overrideWithQueueMappings = conf.getOverrideWithQueueMappings(); if (groups == null) { - //We cannot use Groups#getUserToGroupsMappingService here, because when - //tests change the HADOOP_SECURITY_GROUP_MAPPING, Groups won't refresh its - //cached instance of groups, so we might get a Group instance which - //ignores the HADOOP_SECURITY_GROUP_MAPPING settings. - groups = new Groups(conf); + groups = Groups.getUserToGroupsMappingService(conf); } MappingRuleValidationContext validationContext = buildValidationContext(); From b1ff11ade9014fd4e1fcd2ec3df127ff31551614 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 1 Apr 2021 01:57:31 +0900 Subject: [PATCH 172/666] HADOOP-17608. Fix NPE in TestKMS (#2828) --- .../hadoop/crypto/key/kms/server/TestKMS.java | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index 3d59e6f5be7b7..dbe685b6a3d8b 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -18,6 +18,8 @@ package org.apache.hadoop.crypto.key.kms.server; import java.util.function.Supplier; + +import org.apache.commons.lang3.ThreadUtils; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; import org.apache.curator.test.TestingServer; import org.apache.hadoop.conf.Configuration; @@ -525,6 +527,7 @@ public void testStartStop(final boolean ssl, final boolean kerberos) if (ssl) { sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf); try { + // the first reloader thread is created here sslFactory.init(); } catch (GeneralSecurityException ex) { throw new IOException(ex); @@ -541,31 +544,29 @@ public Void call() throws Exception { final URI uri = createKMSUri(getKMSUrl()); if (ssl) { + // the second reloader thread is created here KeyProvider testKp = createProvider(uri, conf); - ThreadGroup threadGroup = Thread.currentThread().getThreadGroup(); - while (threadGroup.getParent() != null) { - threadGroup = threadGroup.getParent(); - } - Thread[] threads = new Thread[threadGroup.activeCount()]; - threadGroup.enumerate(threads); - Thread reloaderThread = null; - for (Thread thread : threads) { - if ((thread.getName() != null) - && (thread.getName().contains(SSL_RELOADER_THREAD_NAME))) { - reloaderThread = thread; - } - } - Assert.assertTrue("Reloader is not alive", reloaderThread.isAlive()); - // Explicitly close the provider so we can verify the internal thread - // is shutdown + Collection reloaderThreads = + ThreadUtils.findThreadsByName(SSL_RELOADER_THREAD_NAME); + // now there are two active reloader threads + assertEquals(2, reloaderThreads.size()); + // Explicitly close the provider so we can verify + // the second reloader thread is shutdown testKp.close(); boolean reloaderStillAlive = true; for (int i = 0; i < 10; i++) { - reloaderStillAlive = reloaderThread.isAlive(); - if (!reloaderStillAlive) break; + for (Thread thread : reloaderThreads) { + if (!thread.isAlive()) { + reloaderStillAlive = false; + break; + } + } Thread.sleep(1000); } Assert.assertFalse("Reloader is still alive", reloaderStillAlive); + reloaderThreads = + ThreadUtils.findThreadsByName(SSL_RELOADER_THREAD_NAME); + assertEquals(1, reloaderThreads.size()); } if (kerberos) { From 6a06e659551fd7be8492ee248d37bca5bf353cc8 Mon Sep 17 00:00:00 2001 From: Borislav Iordanov Date: Wed, 31 Mar 2021 13:07:42 -0400 Subject: [PATCH 173/666] Hadoop 16524 - resubmission following some unit test fixes (#2693) Signed-off-by: stack --- .../org/apache/hadoop/http/HttpServer2.java | 62 ++++- .../ssl/FileBasedKeyStoresFactory.java | 222 +++++++++++------- .../security/ssl/FileMonitoringTimerTask.java | 85 +++++++ .../ssl/ReloadingX509KeystoreManager.java | 157 +++++++++++++ .../ssl/ReloadingX509TrustManager.java | 95 ++------ .../ssl/TestReloadingX509KeyManager.java | 205 ++++++++++++++++ .../ssl/TestReloadingX509TrustManager.java | 77 +++--- .../hdfs/web/TestURLConnectionFactory.java | 3 +- .../client/api/impl/TestTimelineClient.java | 3 +- hadoop-yarn-project/hadoop-yarn/pom.xml | 3 +- 10 files changed, 715 insertions(+), 197 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 7534cba45ee13..dde27dbec497a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -27,14 +27,17 @@ import java.net.MalformedURLException; import java.net.URI; import java.net.URL; -import java.util.Arrays; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Enumeration; -import java.util.HashMap; +import java.nio.file.Paths; import java.util.List; +import java.util.ArrayList; import java.util.Map; +import java.util.HashMap; +import java.util.Collections; +import java.util.Optional; import java.util.Properties; +import java.util.Enumeration; +import java.util.Arrays; +import java.util.Timer; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -75,6 +78,8 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory; +import org.apache.hadoop.security.ssl.FileMonitoringTimerTask; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; @@ -186,6 +191,7 @@ public final class HttpServer2 implements FilterContainer { static final String STATE_DESCRIPTION_ALIVE = " - alive"; static final String STATE_DESCRIPTION_NOT_LIVE = " - not live"; private final SignerSecretProvider secretProvider; + private final Optional configurationChangeMonitor; private XFrameOption xFrameOption; private boolean xFrameOptionIsEnabled; public static final String HTTP_HEADER_PREFIX = "hadoop.http.header."; @@ -244,6 +250,8 @@ public static class Builder { private boolean sniHostCheckEnabled; + private Optional configurationChangeMonitor = Optional.empty(); + public Builder setName(String name){ this.name = name; return this; @@ -574,12 +582,45 @@ private ServerConnector createHttpsChannelConnector( } setEnabledProtocols(sslContextFactory); + + long storesReloadInterval = + conf.getLong(FileBasedKeyStoresFactory.SSL_STORES_RELOAD_INTERVAL_TPL_KEY, + FileBasedKeyStoresFactory.DEFAULT_SSL_STORES_RELOAD_INTERVAL); + + if (storesReloadInterval > 0) { + this.configurationChangeMonitor = Optional.of( + this.makeConfigurationChangeMonitor(storesReloadInterval, sslContextFactory)); + } + conn.addFirstConnectionFactory(new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString())); return conn; } + private Timer makeConfigurationChangeMonitor(long reloadInterval, + SslContextFactory.Server sslContextFactory) { + java.util.Timer timer = new java.util.Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + // + // The Jetty SSLContextFactory provides a 'reload' method which will reload both + // truststore and keystore certificates. + // + timer.schedule(new FileMonitoringTimerTask( + Paths.get(keyStore), + path -> { + LOG.info("Reloading certificates from store keystore " + keyStore); + try { + sslContextFactory.reload(factory -> { }); + } catch (Exception ex) { + LOG.error("Failed to reload SSL keystore certificates", ex); + } + },null), + reloadInterval, + reloadInterval + ); + return timer; + } + private void setEnabledProtocols(SslContextFactory sslContextFactory) { String enabledProtocols = conf.get(SSLFactory.SSL_ENABLED_PROTOCOLS_KEY, SSLFactory.SSL_ENABLED_PROTOCOLS_DEFAULT); @@ -622,6 +663,7 @@ private HttpServer2(final Builder b) throws IOException { this.webAppContext = createWebAppContext(b, adminsAcl, appDir); this.xFrameOptionIsEnabled = b.xFrameEnabled; this.xFrameOption = b.xFrameOption; + this.configurationChangeMonitor = b.configurationChangeMonitor; try { this.secretProvider = @@ -1420,6 +1462,16 @@ void openListeners() throws Exception { */ public void stop() throws Exception { MultiException exception = null; + if (this.configurationChangeMonitor.isPresent()) { + try { + this.configurationChangeMonitor.get().cancel(); + } catch (Exception e) { + LOG.error( + "Error while canceling configuration monitoring timer for webapp" + + webAppContext.getDisplayName(), e); + exception = addMultiException(exception, e); + } + } for (ServerConnector c : listeners) { try { c.close(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java index b184e4a152b8b..236d881581dac 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java @@ -29,20 +29,20 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManager; import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.text.MessageFormat; +import java.util.Timer; /** * {@link KeyStoresFactory} implementation that reads the certificates from * keystore files. *

- * if the trust certificates keystore file changes, the {@link TrustManager} - * is refreshed with the new trust certificate entries (using a - * {@link ReloadingX509TrustManager} trustmanager). + * If either the truststore or the keystore certificates file changes, it + * would be refreshed under the corresponding wrapper implementation - + * {@link ReloadingX509KeystoreManager} or {@link ReloadingX509TrustManager}. + *

*/ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -51,6 +51,19 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { private static final Logger LOG = LoggerFactory.getLogger(FileBasedKeyStoresFactory.class); + + /** + * The name of the timer thread monitoring file changes. + */ + public static final String SSL_MONITORING_THREAD_NAME = "SSL Certificates Store Monitor"; + + /** + * The refresh interval used to check if either of the truststore or keystore + * certificate file has changed. + */ + public static final String SSL_STORES_RELOAD_INTERVAL_TPL_KEY = + "ssl.{0}.stores.reload.interval"; + public static final String SSL_KEYSTORE_LOCATION_TPL_KEY = "ssl.{0}.keystore.location"; public static final String SSL_KEYSTORE_PASSWORD_TPL_KEY = @@ -77,14 +90,119 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { public static final String DEFAULT_KEYSTORE_TYPE = "jks"; /** - * Reload interval in milliseconds. + * The default time interval in milliseconds used to check if either + * of the truststore or keystore certificates file has changed and needs reloading. */ - public static final int DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL = 10000; + public static final int DEFAULT_SSL_STORES_RELOAD_INTERVAL = 10000; private Configuration conf; private KeyManager[] keyManagers; private TrustManager[] trustManagers; private ReloadingX509TrustManager trustManager; + private Timer fileMonitoringTimer; + + + private void createTrustManagersFromConfiguration(SSLFactory.Mode mode, + String truststoreType, + String truststoreLocation, + long storesReloadInterval) + throws IOException, GeneralSecurityException { + String passwordProperty = resolvePropertyName(mode, + SSL_TRUSTSTORE_PASSWORD_TPL_KEY); + String truststorePassword = getPassword(conf, passwordProperty, ""); + if (truststorePassword.isEmpty()) { + // An empty trust store password is legal; the trust store password + // is only required when writing to a trust store. Otherwise it's + // an optional integrity check. + truststorePassword = null; + } + + // Check if obsolete truststore specific reload interval is present for backward compatible + long truststoreReloadInterval = + conf.getLong( + resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), + storesReloadInterval); + + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation + + ", reloading at " + truststoreReloadInterval + " millis."); + } + + trustManager = new ReloadingX509TrustManager( + truststoreType, + truststoreLocation, + truststorePassword); + + if (truststoreReloadInterval > 0) { + fileMonitoringTimer.schedule( + new FileMonitoringTimerTask( + Paths.get(truststoreLocation), + path -> trustManager.loadFrom(path), + exception -> LOG.error(ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE, exception)), + truststoreReloadInterval, + truststoreReloadInterval); + } + + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); + } + trustManagers = new TrustManager[]{trustManager}; + } + + /** + * Implements logic of initializing the KeyManagers with the options + * to reload keystores. + * @param mode client or server + * @param keystoreType The keystore type. + * @param storesReloadInterval The interval to check if the keystore certificates + * file has changed. + */ + private void createKeyManagersFromConfiguration(SSLFactory.Mode mode, + String keystoreType, long storesReloadInterval) + throws GeneralSecurityException, IOException { + String locationProperty = + resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY); + String keystoreLocation = conf.get(locationProperty, ""); + if (keystoreLocation.isEmpty()) { + throw new GeneralSecurityException("The property '" + locationProperty + + "' has not been set in the ssl configuration file."); + } + String passwordProperty = + resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY); + String keystorePassword = getPassword(conf, passwordProperty, ""); + if (keystorePassword.isEmpty()) { + throw new GeneralSecurityException("The property '" + passwordProperty + + "' has not been set in the ssl configuration file."); + } + String keyPasswordProperty = + resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY); + // Key password defaults to the same value as store password for + // compatibility with legacy configurations that did not use a separate + // configuration property for key password. + String keystoreKeyPassword = getPassword( + conf, keyPasswordProperty, keystorePassword); + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation); + } + + ReloadingX509KeystoreManager keystoreManager = new ReloadingX509KeystoreManager( + keystoreType, + keystoreLocation, + keystorePassword, + keystoreKeyPassword); + + if (storesReloadInterval > 0) { + fileMonitoringTimer.schedule( + new FileMonitoringTimerTask( + Paths.get(keystoreLocation), + path -> keystoreManager.loadFrom(path), + exception -> LOG.error(ReloadingX509KeystoreManager.RELOAD_ERROR_MESSAGE, exception)), + storesReloadInterval, + storesReloadInterval); + } + + keyManagers = new KeyManager[] { keystoreManager }; + } /** * Resolves a property name to its client/server version if applicable. @@ -139,56 +257,28 @@ public void init(SSLFactory.Mode mode) conf.getBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, SSLFactory.SSL_REQUIRE_CLIENT_CERT_DEFAULT); + long storesReloadInterval = conf.getLong( + resolvePropertyName(mode, SSL_STORES_RELOAD_INTERVAL_TPL_KEY), + DEFAULT_SSL_STORES_RELOAD_INTERVAL); + + fileMonitoringTimer = new Timer(SSL_MONITORING_THREAD_NAME, true); + // certificate store String keystoreType = - conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY), - DEFAULT_KEYSTORE_TYPE); - KeyStore keystore = KeyStore.getInstance(keystoreType); - String keystoreKeyPassword = null; - if (requireClientCert || mode == SSLFactory.Mode.SERVER) { - String locationProperty = - resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY); - String keystoreLocation = conf.get(locationProperty, ""); - if (keystoreLocation.isEmpty()) { - throw new GeneralSecurityException("The property '" + locationProperty + - "' has not been set in the ssl configuration file."); - } - String passwordProperty = - resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY); - String keystorePassword = getPassword(conf, passwordProperty, ""); - if (keystorePassword.isEmpty()) { - throw new GeneralSecurityException("The property '" + passwordProperty + - "' has not been set in the ssl configuration file."); - } - String keyPasswordProperty = - resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY); - // Key password defaults to the same value as store password for - // compatibility with legacy configurations that did not use a separate - // configuration property for key password. - keystoreKeyPassword = getPassword( - conf, keyPasswordProperty, keystorePassword); - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation); - } + conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY), + DEFAULT_KEYSTORE_TYPE); - InputStream is = Files.newInputStream(Paths.get(keystoreLocation)); - try { - keystore.load(is, keystorePassword.toCharArray()); - } finally { - is.close(); - } - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " Loaded KeyStore: " + keystoreLocation); - } + if (requireClientCert || mode == SSLFactory.Mode.SERVER) { + createKeyManagersFromConfiguration(mode, keystoreType, storesReloadInterval); } else { + KeyStore keystore = KeyStore.getInstance(keystoreType); keystore.load(null, null); + KeyManagerFactory keyMgrFactory = KeyManagerFactory + .getInstance(SSLFactory.SSLCERTIFICATE); + + keyMgrFactory.init(keystore, null); + keyManagers = keyMgrFactory.getKeyManagers(); } - KeyManagerFactory keyMgrFactory = KeyManagerFactory - .getInstance(SSLFactory.SSLCERTIFICATE); - - keyMgrFactory.init(keystore, (keystoreKeyPassword != null) ? - keystoreKeyPassword.toCharArray() : null); - keyManagers = keyMgrFactory.getKeyManagers(); //trust store String truststoreType = @@ -199,33 +289,7 @@ public void init(SSLFactory.Mode mode) resolvePropertyName(mode, SSL_TRUSTSTORE_LOCATION_TPL_KEY); String truststoreLocation = conf.get(locationProperty, ""); if (!truststoreLocation.isEmpty()) { - String passwordProperty = resolvePropertyName(mode, - SSL_TRUSTSTORE_PASSWORD_TPL_KEY); - String truststorePassword = getPassword(conf, passwordProperty, ""); - if (truststorePassword.isEmpty()) { - // An empty trust store password is legal; the trust store password - // is only required when writing to a trust store. Otherwise it's - // an optional integrity check. - truststorePassword = null; - } - long truststoreReloadInterval = - conf.getLong( - resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), - DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL); - - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation); - } - - trustManager = new ReloadingX509TrustManager(truststoreType, - truststoreLocation, - truststorePassword, - truststoreReloadInterval); - trustManager.init(); - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); - } - trustManagers = new TrustManager[]{trustManager}; + createTrustManagersFromConfiguration(mode, truststoreType, truststoreLocation, storesReloadInterval); } else { if (LOG.isDebugEnabled()) { LOG.debug("The property '" + locationProperty + "' has not been set, " + @@ -256,7 +320,7 @@ String getPassword(Configuration conf, String alias, String defaultPass) { @Override public synchronized void destroy() { if (trustManager != null) { - trustManager.destroy(); + fileMonitoringTimer.cancel(); trustManager = null; keyManagers = null; trustManagers = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java new file mode 100644 index 0000000000000..40b61978ef1f4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security.ssl; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.file.Path; +import java.util.TimerTask; +import java.util.function.Consumer; + +/** + * Implements basic logic to track when a file changes on disk and call the action + * passed to the constructor when it does. An exception handler can optionally also be specified + * in the constructor, otherwise any exception occurring during process will be logged + * using this class' logger. + */ +@InterfaceAudience.Private +public class FileMonitoringTimerTask extends TimerTask { + + static final Logger LOG = LoggerFactory.getLogger(FileMonitoringTimerTask.class); + + @VisibleForTesting + static final String PROCESS_ERROR_MESSAGE = + "Could not process file change : "; + + final private Path filePath; + final private Consumer onFileChange; + final Consumer onChangeFailure; + private long lastProcessed; + + /** + * Create file monitoring task to be scheduled using a standard Java {@link java.util.Timer} + * instance. + * + * @param filePath The path to the file to monitor. + * @param onFileChange The function to call when the file has changed. + * @param onChangeFailure The function to call when an exception is thrown during the + * file change processing. + */ + public FileMonitoringTimerTask(Path filePath, Consumer onFileChange, + Consumer onChangeFailure) { + Preconditions.checkNotNull(filePath, "path to monitor disk file is not set"); + Preconditions.checkNotNull(onFileChange, "action to monitor disk file is not set"); + + this.filePath = filePath; + this.lastProcessed = filePath.toFile().lastModified(); + this.onFileChange = onFileChange; + this.onChangeFailure = onChangeFailure; + } + + @Override + public void run() { + if (lastProcessed != filePath.toFile().lastModified()) { + try { + onFileChange.accept(filePath); + } catch (Throwable t) { + if (onChangeFailure != null) { + onChangeFailure.accept(t); + } else { + LOG.error(PROCESS_ERROR_MESSAGE + filePath.toString(), t); + } + } + lastProcessed = filePath.toFile().lastModified(); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java new file mode 100644 index 0000000000000..72e8b6b63b420 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security.ssl; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.*; +import java.io.IOException; +import java.io.InputStream; +import java.net.Socket; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.Principal; +import java.security.PrivateKey; +import java.security.cert.X509Certificate; +import java.util.concurrent.atomic.AtomicReference; + +/** + * An implementation of X509KeyManager that exposes a method, + * {@link #loadFrom(Path)} to reload its configuration. Note that it is necessary + * to implement the X509ExtendedKeyManager to properly delegate + * the additional methods, otherwise the SSL handshake will fail. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ReloadingX509KeystoreManager extends X509ExtendedKeyManager { + + private static final Logger LOG = LoggerFactory.getLogger(ReloadingX509TrustManager.class); + + static final String RELOAD_ERROR_MESSAGE = + "Could not load keystore (keep using existing one) : "; + + final private String type; + final private String storePassword; + final private String keyPassword; + private AtomicReference keyManagerRef; + + /** + * Construct a Reloading509KeystoreManager + * + * @param type type of keystore file, typically 'jks'. + * @param location local path to the keystore file. + * @param storePassword password of the keystore file. + * @param keyPassword The password of the key. + * @throws IOException + * @throws GeneralSecurityException + */ + public ReloadingX509KeystoreManager(String type, String location, + String storePassword, String keyPassword) + throws IOException, GeneralSecurityException { + this.type = type; + this.storePassword = storePassword; + this.keyPassword = keyPassword; + keyManagerRef = new AtomicReference(); + keyManagerRef.set(loadKeyManager(Paths.get(location))); + } + + @Override + public String chooseEngineClientAlias(String[] strings, Principal[] principals, + SSLEngine sslEngine) { + return keyManagerRef.get().chooseEngineClientAlias(strings, principals, sslEngine); + } + + @Override + public String chooseEngineServerAlias(String s, Principal[] principals, + SSLEngine sslEngine) { + return keyManagerRef.get().chooseEngineServerAlias(s, principals, sslEngine); + } + + @Override + public String[] getClientAliases(String s, Principal[] principals) { + return keyManagerRef.get().getClientAliases(s, principals); + } + + @Override + public String chooseClientAlias(String[] strings, Principal[] principals, + Socket socket) { + return keyManagerRef.get().chooseClientAlias(strings, principals, socket); + } + + @Override + public String[] getServerAliases(String s, Principal[] principals) { + return keyManagerRef.get().getServerAliases(s, principals); + } + + @Override + public String chooseServerAlias(String s, Principal[] principals, + Socket socket) { + return keyManagerRef.get().chooseServerAlias(s, principals, socket); + } + + @Override + public X509Certificate[] getCertificateChain(String s) { + return keyManagerRef.get().getCertificateChain(s); + } + + @Override + public PrivateKey getPrivateKey(String s) { + return keyManagerRef.get().getPrivateKey(s); + } + + public ReloadingX509KeystoreManager loadFrom(Path path) { + try { + this.keyManagerRef.set(loadKeyManager(path)); + } catch (Exception ex) { + // The Consumer.accept interface forces us to convert to unchecked + throw new RuntimeException(ex); + } + return this; + } + + private X509ExtendedKeyManager loadKeyManager(Path path) + throws IOException, GeneralSecurityException { + + X509ExtendedKeyManager keyManager = null; + KeyStore keystore = KeyStore.getInstance(type); + + try (InputStream is = Files.newInputStream(path)) { + keystore.load(is, this.storePassword.toCharArray()); + } + + LOG.debug(" Loaded KeyStore: " + path.toFile().getAbsolutePath()); + + KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance( + SSLFactory.SSLCERTIFICATE); + keyMgrFactory.init(keystore, + (keyPassword != null) ? keyPassword.toCharArray() : null); + for (KeyManager candidate: keyMgrFactory.getKeyManagers()) { + if (candidate instanceof X509ExtendedKeyManager) { + keyManager = (X509ExtendedKeyManager)candidate; + break; + } + } + return keyManager; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java index 7430477932292..68fd4c161005c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java @@ -32,6 +32,8 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.cert.CertificateException; @@ -39,31 +41,23 @@ import java.util.concurrent.atomic.AtomicReference; /** - * A {@link TrustManager} implementation that reloads its configuration when - * the truststore file on disk changes. + * A {@link TrustManager} implementation that exposes a method, {@link #loadFrom(Path)} + * to reload its configuration for example when the truststore file on disk changes. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public final class ReloadingX509TrustManager - implements X509TrustManager, Runnable { +public final class ReloadingX509TrustManager implements X509TrustManager { - @VisibleForTesting static final Logger LOG = LoggerFactory.getLogger(ReloadingX509TrustManager.class); - @VisibleForTesting + static final String RELOAD_ERROR_MESSAGE = "Could not load truststore (keep using existing one) : "; private String type; - private File file; private String password; - private long lastLoaded; - private long reloadInterval; private AtomicReference trustManagerRef; - private volatile boolean running; - private Thread reloader; - /** * Creates a reloadable trustmanager. The trustmanager reloads itself * if the underlying trustore file has changed. @@ -71,49 +65,18 @@ public final class ReloadingX509TrustManager * @param type type of truststore file, typically 'jks'. * @param location local path to the truststore file. * @param password password of the truststore file. - * @param reloadInterval interval to check if the truststore file has * changed, in milliseconds. * @throws IOException thrown if the truststore could not be initialized due * to an IO error. * @throws GeneralSecurityException thrown if the truststore could not be * initialized due to a security error. */ - public ReloadingX509TrustManager(String type, String location, - String password, long reloadInterval) + public ReloadingX509TrustManager(String type, String location, String password) throws IOException, GeneralSecurityException { this.type = type; - file = new File(location); this.password = password; trustManagerRef = new AtomicReference(); - trustManagerRef.set(loadTrustManager()); - this.reloadInterval = reloadInterval; - } - - /** - * Starts the reloader thread. - */ - public void init() { - reloader = new Thread(this, "Truststore reloader thread"); - reloader.setDaemon(true); - running = true; - reloader.start(); - } - - /** - * Stops the reloader thread. - */ - public void destroy() { - running = false; - reloader.interrupt(); - } - - /** - * Returns the reload check interval. - * - * @return the reload check interval, in milliseconds. - */ - public long getReloadInterval() { - return reloadInterval; + trustManagerRef.set(loadTrustManager(Paths.get(location))); } @Override @@ -151,27 +114,24 @@ public X509Certificate[] getAcceptedIssuers() { return issuers; } - boolean needsReload() { - boolean reload = true; - if (file.exists()) { - if (file.lastModified() == lastLoaded) { - reload = false; - } - } else { - lastLoaded = 0; + public ReloadingX509TrustManager loadFrom(Path path) { + try { + this.trustManagerRef.set(loadTrustManager(path)); + } catch (Exception ex) { + // The Consumer.accept interface forces us to convert to unchecked + throw new RuntimeException(RELOAD_ERROR_MESSAGE, ex); } - return reload; + return this; } - X509TrustManager loadTrustManager() + X509TrustManager loadTrustManager(Path path) throws IOException, GeneralSecurityException { X509TrustManager trustManager = null; KeyStore ks = KeyStore.getInstance(type); - InputStream in = Files.newInputStream(file.toPath()); + InputStream in = Files.newInputStream(path); try { ks.load(in, (password == null) ? null : password.toCharArray()); - lastLoaded = file.lastModified(); - LOG.debug("Loaded truststore '" + file + "'"); + LOG.debug("Loaded truststore '" + path + "'"); } finally { in.close(); } @@ -188,23 +148,4 @@ X509TrustManager loadTrustManager() } return trustManager; } - - @Override - public void run() { - while (running) { - try { - Thread.sleep(reloadInterval); - } catch (InterruptedException e) { - //NOP - } - if (running && needsReload()) { - try { - trustManagerRef.set(loadTrustManager()); - } catch (Exception ex) { - LOG.warn(RELOAD_ERROR_MESSAGE + ex.toString(), ex); - } - } - } - } - } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java new file mode 100644 index 0000000000000..bf0a6abdc676d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security.ssl; + +import org.apache.hadoop.thirdparty.com.google.common.base.Supplier; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Paths; +import java.security.KeyPair; +import java.security.cert.X509Certificate; +import java.util.Timer; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +public class TestReloadingX509KeyManager { + + private static final String BASEDIR = GenericTestUtils.getTempPath( + TestReloadingX509TrustManager.class.getSimpleName()); + + private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs( + FileMonitoringTimerTask.LOG); + + @BeforeClass + public static void setUp() throws Exception { + File base = new File(BASEDIR); + FileUtil.fullyDelete(base); + base.mkdirs(); + } + + @Test(expected = IOException.class) + public void testLoadMissingKeyStore() throws Exception { + String keystoreLocation = BASEDIR + "/testmissing.jks"; + + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + } + + @Test(expected = IOException.class) + public void testLoadCorruptKeyStore() throws Exception { + String keystoreLocation = BASEDIR + "/testcorrupt.jks"; + OutputStream os = new FileOutputStream(keystoreLocation); + os.write(1); + os.close(); + + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + } + + @Test (timeout = 3000000) + public void testReload() throws Exception { + KeyPair kp = generateKeyPair("RSA"); + X509Certificate sCert = generateCertificate("CN=localhost, O=server", kp, 30, + "SHA1withRSA"); + String keystoreLocation = BASEDIR + "/testreload.jks"; + createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), sCert); + + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + try { + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + + // Wait so that the file modification time is different + Thread.sleep((reloadInterval+ 1000)); + + // Change the certificate with a new keypair + final KeyPair anotherKP = generateKeyPair("RSA"); + sCert = KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", anotherKP, 30, + "SHA1withRSA"); + createKeyStore(keystoreLocation, "password", "cert1", anotherKP.getPrivate(), sCert); + + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return tm.getPrivateKey("cert1").equals(kp.getPrivate()); + } + }, (int) reloadInterval, 100000); + } finally { + fileMonitoringTimer.cancel(); + } + } + + @Test (timeout = 30000) + public void testReloadMissingTrustStore() throws Exception { + KeyPair kp = generateKeyPair("RSA"); + X509Certificate cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); + String keystoreLocation = BASEDIR + "/testmissing.jks"; + createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), cert1); + + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + try { + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + + assertFalse(reloaderLog.getOutput().contains( + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + + // Wait for the first reload to happen so we actually detect a change after the delete + Thread.sleep((reloadInterval+ 1000)); + + new File(keystoreLocation).delete(); + + // Wait for the reload to happen and log to get written to + Thread.sleep((reloadInterval+ 1000)); + + waitForFailedReloadAtLeastOnce((int) reloadInterval); + + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + } finally { + reloaderLog.stopCapturing(); + fileMonitoringTimer.cancel(); + } + } + + + @Test (timeout = 30000) + public void testReloadCorruptTrustStore() throws Exception { + KeyPair kp = generateKeyPair("RSA"); + X509Certificate cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); + String keystoreLocation = BASEDIR + "/testmissing.jks"; + createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), cert1); + + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + try { + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + + // Wait so that the file modification time is different + Thread.sleep((reloadInterval + 1000)); + + assertFalse(reloaderLog.getOutput().contains( + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + OutputStream os = new FileOutputStream(keystoreLocation); + os.write(1); + os.close(); + + waitForFailedReloadAtLeastOnce((int) reloadInterval); + + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + } finally { + reloaderLog.stopCapturing(); + fileMonitoringTimer.cancel(); + } + } + + /**Wait for the reloader thread to load the configurations at least once + * by probing the log of the thread if the reload fails. + */ + private void waitForFailedReloadAtLeastOnce(int reloadInterval) + throws InterruptedException, TimeoutException { + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return reloaderLog.getOutput().contains( + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE); + } + }, reloadInterval, 10 * 1000); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java index 441f552649298..63589592f35dd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java @@ -30,10 +30,12 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; +import java.nio.file.Paths; import java.security.KeyPair; import java.security.cert.X509Certificate; import java.util.HashMap; import java.util.Map; +import java.util.Timer; import java.util.concurrent.TimeoutException; import static org.junit.Assert.assertEquals; @@ -50,7 +52,7 @@ public class TestReloadingX509TrustManager { private X509Certificate cert1; private X509Certificate cert2; private final LogCapturer reloaderLog = LogCapturer.captureLogs( - ReloadingX509TrustManager.LOG); + FileMonitoringTimerTask.LOG); @BeforeClass public static void setUp() throws Exception { @@ -64,12 +66,7 @@ public void testLoadMissingTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testmissing.jks"; ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); - try { - tm.init(); - } finally { - tm.destroy(); - } + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); } @Test(expected = IOException.class) @@ -80,12 +77,7 @@ public void testLoadCorruptTrustStore() throws Exception { os.close(); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); - try { - tm.init(); - } finally { - tm.destroy(); - } + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); } @Test (timeout = 30000) @@ -96,14 +88,17 @@ public void testReload() throws Exception { String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); // Wait so that the file modification time is different - Thread.sleep((tm.getReloadInterval() + 1000)); + Thread.sleep((reloadInterval+ 1000)); // Add another cert Map certs = new HashMap(); @@ -116,9 +111,9 @@ public void testReload() throws Exception { public Boolean get() { return tm.getAcceptedIssuers().length == 2; } - }, (int) tm.getReloadInterval(), 10000); + }, (int) reloadInterval, 100000); } finally { - tm.destroy(); + fileMonitoringTimer.cancel(); } } @@ -130,27 +125,38 @@ public void testReloadMissingTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testmissing.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); X509Certificate cert = tm.getAcceptedIssuers()[0]; assertFalse(reloaderLog.getOutput().contains( - ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE)); + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + + // Wait for the first reload to happen so we actually detect a change after the delete + Thread.sleep((reloadInterval+ 1000)); + new File(truststoreLocation).delete(); - waitForFailedReloadAtLeastOnce((int) tm.getReloadInterval()); + // Wait for the reload to happen and log to get written to + Thread.sleep((reloadInterval+ 1000)); + + waitForFailedReloadAtLeastOnce((int) reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); - tm.destroy(); + fileMonitoringTimer.cancel(); } } + @Test (timeout = 30000) public void testReloadCorruptTrustStore() throws Exception { KeyPair kp = generateKeyPair("RSA"); @@ -159,29 +165,32 @@ public void testReloadCorruptTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testcorrupt.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); final X509Certificate cert = tm.getAcceptedIssuers()[0]; // Wait so that the file modification time is different - Thread.sleep((tm.getReloadInterval() + 1000)); + Thread.sleep((reloadInterval + 1000)); assertFalse(reloaderLog.getOutput().contains( - ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE)); + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); OutputStream os = new FileOutputStream(truststoreLocation); os.write(1); os.close(); - waitForFailedReloadAtLeastOnce((int) tm.getReloadInterval()); + waitForFailedReloadAtLeastOnce((int) reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); - tm.destroy(); + fileMonitoringTimer.cancel(); } } @@ -194,7 +203,7 @@ private void waitForFailedReloadAtLeastOnce(int reloadInterval) @Override public Boolean get() { return reloaderLog.getOutput().contains( - ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE); + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE); } }, reloadInterval, 10 * 1000); } @@ -208,13 +217,15 @@ public void testNoPassword() throws Exception { String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, null, 10); + new ReloadingX509TrustManager("jks", truststoreLocation, null); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), 10, 10); assertEquals(1, tm.getAcceptedIssuers().length); } finally { - tm.destroy(); + fileMonitoringTimer.cancel(); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index 108ce50420640..eebe0baaf2356 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; +import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.test.GenericTestUtils; @@ -99,7 +100,7 @@ public void testSSLFactoryCleanup() throws Exception { Thread reloaderThread = null; for (Thread thread : threads) { if ((thread.getName() != null) - && (thread.getName().contains("Truststore reloader thread"))) { + && (thread.getName().contains(SSL_MONITORING_THREAD_NAME))) { reloaderThread = thread; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java index 9f9564a310641..4d9c32067339b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java @@ -41,6 +41,7 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; +import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME; import org.apache.hadoop.test.TestGenericTestUtils; import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; @@ -476,7 +477,7 @@ public void testTimelineClientCleanup() throws Exception { Thread reloaderThread = null; for (Thread thread : threads) { if ((thread.getName() != null) - && (thread.getName().contains("Truststore reloader thread"))) { + && (thread.getName().contains(SSL_MONITORING_THREAD_NAME))) { reloaderThread = thread; } } diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index dff9a2b08a9f9..caa9a1b6c0bc6 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -11,7 +11,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. ---> +--> + 4.0.0 org.apache.hadoop From 6e8a3f24569f7874684e801475c47dad72eb275e Mon Sep 17 00:00:00 2001 From: stack Date: Wed, 31 Mar 2021 10:39:55 -0700 Subject: [PATCH 174/666] Revert "Hadoop 16524 - resubmission following some unit test fixes (#2693)" Revert to fix the summary message. This reverts commit 9509bebf7fefbe1cca04e84c9978b8d1a12942e5. --- .../org/apache/hadoop/http/HttpServer2.java | 62 +---- .../ssl/FileBasedKeyStoresFactory.java | 222 +++++++----------- .../security/ssl/FileMonitoringTimerTask.java | 85 ------- .../ssl/ReloadingX509KeystoreManager.java | 157 ------------- .../ssl/ReloadingX509TrustManager.java | 95 ++++++-- .../ssl/TestReloadingX509KeyManager.java | 205 ---------------- .../ssl/TestReloadingX509TrustManager.java | 77 +++--- .../hdfs/web/TestURLConnectionFactory.java | 3 +- .../client/api/impl/TestTimelineClient.java | 3 +- hadoop-yarn-project/hadoop-yarn/pom.xml | 3 +- 10 files changed, 197 insertions(+), 715 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index dde27dbec497a..7534cba45ee13 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -27,17 +27,14 @@ import java.net.MalformedURLException; import java.net.URI; import java.net.URL; -import java.nio.file.Paths; -import java.util.List; +import java.util.Arrays; import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; import java.util.Collections; -import java.util.Optional; -import java.util.Properties; import java.util.Enumeration; -import java.util.Arrays; -import java.util.Timer; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -78,8 +75,6 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.authorize.AccessControlList; -import org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory; -import org.apache.hadoop.security.ssl.FileMonitoringTimerTask; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; @@ -191,7 +186,6 @@ public final class HttpServer2 implements FilterContainer { static final String STATE_DESCRIPTION_ALIVE = " - alive"; static final String STATE_DESCRIPTION_NOT_LIVE = " - not live"; private final SignerSecretProvider secretProvider; - private final Optional configurationChangeMonitor; private XFrameOption xFrameOption; private boolean xFrameOptionIsEnabled; public static final String HTTP_HEADER_PREFIX = "hadoop.http.header."; @@ -250,8 +244,6 @@ public static class Builder { private boolean sniHostCheckEnabled; - private Optional configurationChangeMonitor = Optional.empty(); - public Builder setName(String name){ this.name = name; return this; @@ -582,45 +574,12 @@ private ServerConnector createHttpsChannelConnector( } setEnabledProtocols(sslContextFactory); - - long storesReloadInterval = - conf.getLong(FileBasedKeyStoresFactory.SSL_STORES_RELOAD_INTERVAL_TPL_KEY, - FileBasedKeyStoresFactory.DEFAULT_SSL_STORES_RELOAD_INTERVAL); - - if (storesReloadInterval > 0) { - this.configurationChangeMonitor = Optional.of( - this.makeConfigurationChangeMonitor(storesReloadInterval, sslContextFactory)); - } - conn.addFirstConnectionFactory(new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString())); return conn; } - private Timer makeConfigurationChangeMonitor(long reloadInterval, - SslContextFactory.Server sslContextFactory) { - java.util.Timer timer = new java.util.Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); - // - // The Jetty SSLContextFactory provides a 'reload' method which will reload both - // truststore and keystore certificates. - // - timer.schedule(new FileMonitoringTimerTask( - Paths.get(keyStore), - path -> { - LOG.info("Reloading certificates from store keystore " + keyStore); - try { - sslContextFactory.reload(factory -> { }); - } catch (Exception ex) { - LOG.error("Failed to reload SSL keystore certificates", ex); - } - },null), - reloadInterval, - reloadInterval - ); - return timer; - } - private void setEnabledProtocols(SslContextFactory sslContextFactory) { String enabledProtocols = conf.get(SSLFactory.SSL_ENABLED_PROTOCOLS_KEY, SSLFactory.SSL_ENABLED_PROTOCOLS_DEFAULT); @@ -663,7 +622,6 @@ private HttpServer2(final Builder b) throws IOException { this.webAppContext = createWebAppContext(b, adminsAcl, appDir); this.xFrameOptionIsEnabled = b.xFrameEnabled; this.xFrameOption = b.xFrameOption; - this.configurationChangeMonitor = b.configurationChangeMonitor; try { this.secretProvider = @@ -1462,16 +1420,6 @@ void openListeners() throws Exception { */ public void stop() throws Exception { MultiException exception = null; - if (this.configurationChangeMonitor.isPresent()) { - try { - this.configurationChangeMonitor.get().cancel(); - } catch (Exception e) { - LOG.error( - "Error while canceling configuration monitoring timer for webapp" - + webAppContext.getDisplayName(), e); - exception = addMultiException(exception, e); - } - } for (ServerConnector c : listeners) { try { c.close(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java index 236d881581dac..b184e4a152b8b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java @@ -29,20 +29,20 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManager; import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.text.MessageFormat; -import java.util.Timer; /** * {@link KeyStoresFactory} implementation that reads the certificates from * keystore files. *

- * If either the truststore or the keystore certificates file changes, it - * would be refreshed under the corresponding wrapper implementation - - * {@link ReloadingX509KeystoreManager} or {@link ReloadingX509TrustManager}. - *

+ * if the trust certificates keystore file changes, the {@link TrustManager} + * is refreshed with the new trust certificate entries (using a + * {@link ReloadingX509TrustManager} trustmanager). */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -51,19 +51,6 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { private static final Logger LOG = LoggerFactory.getLogger(FileBasedKeyStoresFactory.class); - - /** - * The name of the timer thread monitoring file changes. - */ - public static final String SSL_MONITORING_THREAD_NAME = "SSL Certificates Store Monitor"; - - /** - * The refresh interval used to check if either of the truststore or keystore - * certificate file has changed. - */ - public static final String SSL_STORES_RELOAD_INTERVAL_TPL_KEY = - "ssl.{0}.stores.reload.interval"; - public static final String SSL_KEYSTORE_LOCATION_TPL_KEY = "ssl.{0}.keystore.location"; public static final String SSL_KEYSTORE_PASSWORD_TPL_KEY = @@ -90,119 +77,14 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { public static final String DEFAULT_KEYSTORE_TYPE = "jks"; /** - * The default time interval in milliseconds used to check if either - * of the truststore or keystore certificates file has changed and needs reloading. + * Reload interval in milliseconds. */ - public static final int DEFAULT_SSL_STORES_RELOAD_INTERVAL = 10000; + public static final int DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL = 10000; private Configuration conf; private KeyManager[] keyManagers; private TrustManager[] trustManagers; private ReloadingX509TrustManager trustManager; - private Timer fileMonitoringTimer; - - - private void createTrustManagersFromConfiguration(SSLFactory.Mode mode, - String truststoreType, - String truststoreLocation, - long storesReloadInterval) - throws IOException, GeneralSecurityException { - String passwordProperty = resolvePropertyName(mode, - SSL_TRUSTSTORE_PASSWORD_TPL_KEY); - String truststorePassword = getPassword(conf, passwordProperty, ""); - if (truststorePassword.isEmpty()) { - // An empty trust store password is legal; the trust store password - // is only required when writing to a trust store. Otherwise it's - // an optional integrity check. - truststorePassword = null; - } - - // Check if obsolete truststore specific reload interval is present for backward compatible - long truststoreReloadInterval = - conf.getLong( - resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), - storesReloadInterval); - - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation + - ", reloading at " + truststoreReloadInterval + " millis."); - } - - trustManager = new ReloadingX509TrustManager( - truststoreType, - truststoreLocation, - truststorePassword); - - if (truststoreReloadInterval > 0) { - fileMonitoringTimer.schedule( - new FileMonitoringTimerTask( - Paths.get(truststoreLocation), - path -> trustManager.loadFrom(path), - exception -> LOG.error(ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE, exception)), - truststoreReloadInterval, - truststoreReloadInterval); - } - - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); - } - trustManagers = new TrustManager[]{trustManager}; - } - - /** - * Implements logic of initializing the KeyManagers with the options - * to reload keystores. - * @param mode client or server - * @param keystoreType The keystore type. - * @param storesReloadInterval The interval to check if the keystore certificates - * file has changed. - */ - private void createKeyManagersFromConfiguration(SSLFactory.Mode mode, - String keystoreType, long storesReloadInterval) - throws GeneralSecurityException, IOException { - String locationProperty = - resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY); - String keystoreLocation = conf.get(locationProperty, ""); - if (keystoreLocation.isEmpty()) { - throw new GeneralSecurityException("The property '" + locationProperty + - "' has not been set in the ssl configuration file."); - } - String passwordProperty = - resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY); - String keystorePassword = getPassword(conf, passwordProperty, ""); - if (keystorePassword.isEmpty()) { - throw new GeneralSecurityException("The property '" + passwordProperty + - "' has not been set in the ssl configuration file."); - } - String keyPasswordProperty = - resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY); - // Key password defaults to the same value as store password for - // compatibility with legacy configurations that did not use a separate - // configuration property for key password. - String keystoreKeyPassword = getPassword( - conf, keyPasswordProperty, keystorePassword); - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation); - } - - ReloadingX509KeystoreManager keystoreManager = new ReloadingX509KeystoreManager( - keystoreType, - keystoreLocation, - keystorePassword, - keystoreKeyPassword); - - if (storesReloadInterval > 0) { - fileMonitoringTimer.schedule( - new FileMonitoringTimerTask( - Paths.get(keystoreLocation), - path -> keystoreManager.loadFrom(path), - exception -> LOG.error(ReloadingX509KeystoreManager.RELOAD_ERROR_MESSAGE, exception)), - storesReloadInterval, - storesReloadInterval); - } - - keyManagers = new KeyManager[] { keystoreManager }; - } /** * Resolves a property name to its client/server version if applicable. @@ -257,28 +139,56 @@ public void init(SSLFactory.Mode mode) conf.getBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, SSLFactory.SSL_REQUIRE_CLIENT_CERT_DEFAULT); - long storesReloadInterval = conf.getLong( - resolvePropertyName(mode, SSL_STORES_RELOAD_INTERVAL_TPL_KEY), - DEFAULT_SSL_STORES_RELOAD_INTERVAL); - - fileMonitoringTimer = new Timer(SSL_MONITORING_THREAD_NAME, true); - // certificate store String keystoreType = - conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY), - DEFAULT_KEYSTORE_TYPE); - + conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY), + DEFAULT_KEYSTORE_TYPE); + KeyStore keystore = KeyStore.getInstance(keystoreType); + String keystoreKeyPassword = null; if (requireClientCert || mode == SSLFactory.Mode.SERVER) { - createKeyManagersFromConfiguration(mode, keystoreType, storesReloadInterval); + String locationProperty = + resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY); + String keystoreLocation = conf.get(locationProperty, ""); + if (keystoreLocation.isEmpty()) { + throw new GeneralSecurityException("The property '" + locationProperty + + "' has not been set in the ssl configuration file."); + } + String passwordProperty = + resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY); + String keystorePassword = getPassword(conf, passwordProperty, ""); + if (keystorePassword.isEmpty()) { + throw new GeneralSecurityException("The property '" + passwordProperty + + "' has not been set in the ssl configuration file."); + } + String keyPasswordProperty = + resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY); + // Key password defaults to the same value as store password for + // compatibility with legacy configurations that did not use a separate + // configuration property for key password. + keystoreKeyPassword = getPassword( + conf, keyPasswordProperty, keystorePassword); + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation); + } + + InputStream is = Files.newInputStream(Paths.get(keystoreLocation)); + try { + keystore.load(is, keystorePassword.toCharArray()); + } finally { + is.close(); + } + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " Loaded KeyStore: " + keystoreLocation); + } } else { - KeyStore keystore = KeyStore.getInstance(keystoreType); keystore.load(null, null); - KeyManagerFactory keyMgrFactory = KeyManagerFactory - .getInstance(SSLFactory.SSLCERTIFICATE); - - keyMgrFactory.init(keystore, null); - keyManagers = keyMgrFactory.getKeyManagers(); } + KeyManagerFactory keyMgrFactory = KeyManagerFactory + .getInstance(SSLFactory.SSLCERTIFICATE); + + keyMgrFactory.init(keystore, (keystoreKeyPassword != null) ? + keystoreKeyPassword.toCharArray() : null); + keyManagers = keyMgrFactory.getKeyManagers(); //trust store String truststoreType = @@ -289,7 +199,33 @@ public void init(SSLFactory.Mode mode) resolvePropertyName(mode, SSL_TRUSTSTORE_LOCATION_TPL_KEY); String truststoreLocation = conf.get(locationProperty, ""); if (!truststoreLocation.isEmpty()) { - createTrustManagersFromConfiguration(mode, truststoreType, truststoreLocation, storesReloadInterval); + String passwordProperty = resolvePropertyName(mode, + SSL_TRUSTSTORE_PASSWORD_TPL_KEY); + String truststorePassword = getPassword(conf, passwordProperty, ""); + if (truststorePassword.isEmpty()) { + // An empty trust store password is legal; the trust store password + // is only required when writing to a trust store. Otherwise it's + // an optional integrity check. + truststorePassword = null; + } + long truststoreReloadInterval = + conf.getLong( + resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), + DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL); + + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation); + } + + trustManager = new ReloadingX509TrustManager(truststoreType, + truststoreLocation, + truststorePassword, + truststoreReloadInterval); + trustManager.init(); + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); + } + trustManagers = new TrustManager[]{trustManager}; } else { if (LOG.isDebugEnabled()) { LOG.debug("The property '" + locationProperty + "' has not been set, " + @@ -320,7 +256,7 @@ String getPassword(Configuration conf, String alias, String defaultPass) { @Override public synchronized void destroy() { if (trustManager != null) { - fileMonitoringTimer.cancel(); + trustManager.destroy(); trustManager = null; keyManagers = null; trustManagers = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java deleted file mode 100644 index 40b61978ef1f4..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.security.ssl; - -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.file.Path; -import java.util.TimerTask; -import java.util.function.Consumer; - -/** - * Implements basic logic to track when a file changes on disk and call the action - * passed to the constructor when it does. An exception handler can optionally also be specified - * in the constructor, otherwise any exception occurring during process will be logged - * using this class' logger. - */ -@InterfaceAudience.Private -public class FileMonitoringTimerTask extends TimerTask { - - static final Logger LOG = LoggerFactory.getLogger(FileMonitoringTimerTask.class); - - @VisibleForTesting - static final String PROCESS_ERROR_MESSAGE = - "Could not process file change : "; - - final private Path filePath; - final private Consumer onFileChange; - final Consumer onChangeFailure; - private long lastProcessed; - - /** - * Create file monitoring task to be scheduled using a standard Java {@link java.util.Timer} - * instance. - * - * @param filePath The path to the file to monitor. - * @param onFileChange The function to call when the file has changed. - * @param onChangeFailure The function to call when an exception is thrown during the - * file change processing. - */ - public FileMonitoringTimerTask(Path filePath, Consumer onFileChange, - Consumer onChangeFailure) { - Preconditions.checkNotNull(filePath, "path to monitor disk file is not set"); - Preconditions.checkNotNull(onFileChange, "action to monitor disk file is not set"); - - this.filePath = filePath; - this.lastProcessed = filePath.toFile().lastModified(); - this.onFileChange = onFileChange; - this.onChangeFailure = onChangeFailure; - } - - @Override - public void run() { - if (lastProcessed != filePath.toFile().lastModified()) { - try { - onFileChange.accept(filePath); - } catch (Throwable t) { - if (onChangeFailure != null) { - onChangeFailure.accept(t); - } else { - LOG.error(PROCESS_ERROR_MESSAGE + filePath.toString(), t); - } - } - lastProcessed = filePath.toFile().lastModified(); - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java deleted file mode 100644 index 72e8b6b63b420..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java +++ /dev/null @@ -1,157 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.security.ssl; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.net.ssl.*; -import java.io.IOException; -import java.io.InputStream; -import java.net.Socket; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.GeneralSecurityException; -import java.security.KeyStore; -import java.security.Principal; -import java.security.PrivateKey; -import java.security.cert.X509Certificate; -import java.util.concurrent.atomic.AtomicReference; - -/** - * An implementation of X509KeyManager that exposes a method, - * {@link #loadFrom(Path)} to reload its configuration. Note that it is necessary - * to implement the X509ExtendedKeyManager to properly delegate - * the additional methods, otherwise the SSL handshake will fail. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class ReloadingX509KeystoreManager extends X509ExtendedKeyManager { - - private static final Logger LOG = LoggerFactory.getLogger(ReloadingX509TrustManager.class); - - static final String RELOAD_ERROR_MESSAGE = - "Could not load keystore (keep using existing one) : "; - - final private String type; - final private String storePassword; - final private String keyPassword; - private AtomicReference keyManagerRef; - - /** - * Construct a Reloading509KeystoreManager - * - * @param type type of keystore file, typically 'jks'. - * @param location local path to the keystore file. - * @param storePassword password of the keystore file. - * @param keyPassword The password of the key. - * @throws IOException - * @throws GeneralSecurityException - */ - public ReloadingX509KeystoreManager(String type, String location, - String storePassword, String keyPassword) - throws IOException, GeneralSecurityException { - this.type = type; - this.storePassword = storePassword; - this.keyPassword = keyPassword; - keyManagerRef = new AtomicReference(); - keyManagerRef.set(loadKeyManager(Paths.get(location))); - } - - @Override - public String chooseEngineClientAlias(String[] strings, Principal[] principals, - SSLEngine sslEngine) { - return keyManagerRef.get().chooseEngineClientAlias(strings, principals, sslEngine); - } - - @Override - public String chooseEngineServerAlias(String s, Principal[] principals, - SSLEngine sslEngine) { - return keyManagerRef.get().chooseEngineServerAlias(s, principals, sslEngine); - } - - @Override - public String[] getClientAliases(String s, Principal[] principals) { - return keyManagerRef.get().getClientAliases(s, principals); - } - - @Override - public String chooseClientAlias(String[] strings, Principal[] principals, - Socket socket) { - return keyManagerRef.get().chooseClientAlias(strings, principals, socket); - } - - @Override - public String[] getServerAliases(String s, Principal[] principals) { - return keyManagerRef.get().getServerAliases(s, principals); - } - - @Override - public String chooseServerAlias(String s, Principal[] principals, - Socket socket) { - return keyManagerRef.get().chooseServerAlias(s, principals, socket); - } - - @Override - public X509Certificate[] getCertificateChain(String s) { - return keyManagerRef.get().getCertificateChain(s); - } - - @Override - public PrivateKey getPrivateKey(String s) { - return keyManagerRef.get().getPrivateKey(s); - } - - public ReloadingX509KeystoreManager loadFrom(Path path) { - try { - this.keyManagerRef.set(loadKeyManager(path)); - } catch (Exception ex) { - // The Consumer.accept interface forces us to convert to unchecked - throw new RuntimeException(ex); - } - return this; - } - - private X509ExtendedKeyManager loadKeyManager(Path path) - throws IOException, GeneralSecurityException { - - X509ExtendedKeyManager keyManager = null; - KeyStore keystore = KeyStore.getInstance(type); - - try (InputStream is = Files.newInputStream(path)) { - keystore.load(is, this.storePassword.toCharArray()); - } - - LOG.debug(" Loaded KeyStore: " + path.toFile().getAbsolutePath()); - - KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance( - SSLFactory.SSLCERTIFICATE); - keyMgrFactory.init(keystore, - (keyPassword != null) ? keyPassword.toCharArray() : null); - for (KeyManager candidate: keyMgrFactory.getKeyManagers()) { - if (candidate instanceof X509ExtendedKeyManager) { - keyManager = (X509ExtendedKeyManager)candidate; - break; - } - } - return keyManager; - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java index 68fd4c161005c..7430477932292 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java @@ -32,8 +32,6 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.cert.CertificateException; @@ -41,23 +39,31 @@ import java.util.concurrent.atomic.AtomicReference; /** - * A {@link TrustManager} implementation that exposes a method, {@link #loadFrom(Path)} - * to reload its configuration for example when the truststore file on disk changes. + * A {@link TrustManager} implementation that reloads its configuration when + * the truststore file on disk changes. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public final class ReloadingX509TrustManager implements X509TrustManager { +public final class ReloadingX509TrustManager + implements X509TrustManager, Runnable { + @VisibleForTesting static final Logger LOG = LoggerFactory.getLogger(ReloadingX509TrustManager.class); - + @VisibleForTesting static final String RELOAD_ERROR_MESSAGE = "Could not load truststore (keep using existing one) : "; private String type; + private File file; private String password; + private long lastLoaded; + private long reloadInterval; private AtomicReference trustManagerRef; + private volatile boolean running; + private Thread reloader; + /** * Creates a reloadable trustmanager. The trustmanager reloads itself * if the underlying trustore file has changed. @@ -65,18 +71,49 @@ public final class ReloadingX509TrustManager implements X509TrustManager { * @param type type of truststore file, typically 'jks'. * @param location local path to the truststore file. * @param password password of the truststore file. + * @param reloadInterval interval to check if the truststore file has * changed, in milliseconds. * @throws IOException thrown if the truststore could not be initialized due * to an IO error. * @throws GeneralSecurityException thrown if the truststore could not be * initialized due to a security error. */ - public ReloadingX509TrustManager(String type, String location, String password) + public ReloadingX509TrustManager(String type, String location, + String password, long reloadInterval) throws IOException, GeneralSecurityException { this.type = type; + file = new File(location); this.password = password; trustManagerRef = new AtomicReference(); - trustManagerRef.set(loadTrustManager(Paths.get(location))); + trustManagerRef.set(loadTrustManager()); + this.reloadInterval = reloadInterval; + } + + /** + * Starts the reloader thread. + */ + public void init() { + reloader = new Thread(this, "Truststore reloader thread"); + reloader.setDaemon(true); + running = true; + reloader.start(); + } + + /** + * Stops the reloader thread. + */ + public void destroy() { + running = false; + reloader.interrupt(); + } + + /** + * Returns the reload check interval. + * + * @return the reload check interval, in milliseconds. + */ + public long getReloadInterval() { + return reloadInterval; } @Override @@ -114,24 +151,27 @@ public X509Certificate[] getAcceptedIssuers() { return issuers; } - public ReloadingX509TrustManager loadFrom(Path path) { - try { - this.trustManagerRef.set(loadTrustManager(path)); - } catch (Exception ex) { - // The Consumer.accept interface forces us to convert to unchecked - throw new RuntimeException(RELOAD_ERROR_MESSAGE, ex); + boolean needsReload() { + boolean reload = true; + if (file.exists()) { + if (file.lastModified() == lastLoaded) { + reload = false; + } + } else { + lastLoaded = 0; } - return this; + return reload; } - X509TrustManager loadTrustManager(Path path) + X509TrustManager loadTrustManager() throws IOException, GeneralSecurityException { X509TrustManager trustManager = null; KeyStore ks = KeyStore.getInstance(type); - InputStream in = Files.newInputStream(path); + InputStream in = Files.newInputStream(file.toPath()); try { ks.load(in, (password == null) ? null : password.toCharArray()); - LOG.debug("Loaded truststore '" + path + "'"); + lastLoaded = file.lastModified(); + LOG.debug("Loaded truststore '" + file + "'"); } finally { in.close(); } @@ -148,4 +188,23 @@ X509TrustManager loadTrustManager(Path path) } return trustManager; } + + @Override + public void run() { + while (running) { + try { + Thread.sleep(reloadInterval); + } catch (InterruptedException e) { + //NOP + } + if (running && needsReload()) { + try { + trustManagerRef.set(loadTrustManager()); + } catch (Exception ex) { + LOG.warn(RELOAD_ERROR_MESSAGE + ex.toString(), ex); + } + } + } + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java deleted file mode 100644 index bf0a6abdc676d..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java +++ /dev/null @@ -1,205 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.security.ssl; - -import org.apache.hadoop.thirdparty.com.google.common.base.Supplier; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.cert.X509Certificate; -import java.util.Timer; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.*; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -public class TestReloadingX509KeyManager { - - private static final String BASEDIR = GenericTestUtils.getTempPath( - TestReloadingX509TrustManager.class.getSimpleName()); - - private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs( - FileMonitoringTimerTask.LOG); - - @BeforeClass - public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - } - - @Test(expected = IOException.class) - public void testLoadMissingKeyStore() throws Exception { - String keystoreLocation = BASEDIR + "/testmissing.jks"; - - ReloadingX509KeystoreManager tm = - new ReloadingX509KeystoreManager("jks", keystoreLocation, - "password", - "password"); - } - - @Test(expected = IOException.class) - public void testLoadCorruptKeyStore() throws Exception { - String keystoreLocation = BASEDIR + "/testcorrupt.jks"; - OutputStream os = new FileOutputStream(keystoreLocation); - os.write(1); - os.close(); - - ReloadingX509KeystoreManager tm = - new ReloadingX509KeystoreManager("jks", keystoreLocation, - "password", - "password"); - } - - @Test (timeout = 3000000) - public void testReload() throws Exception { - KeyPair kp = generateKeyPair("RSA"); - X509Certificate sCert = generateCertificate("CN=localhost, O=server", kp, 30, - "SHA1withRSA"); - String keystoreLocation = BASEDIR + "/testreload.jks"; - createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), sCert); - - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); - ReloadingX509KeystoreManager tm = - new ReloadingX509KeystoreManager("jks", keystoreLocation, - "password", - "password"); - try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); - assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); - - // Wait so that the file modification time is different - Thread.sleep((reloadInterval+ 1000)); - - // Change the certificate with a new keypair - final KeyPair anotherKP = generateKeyPair("RSA"); - sCert = KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", anotherKP, 30, - "SHA1withRSA"); - createKeyStore(keystoreLocation, "password", "cert1", anotherKP.getPrivate(), sCert); - - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return tm.getPrivateKey("cert1").equals(kp.getPrivate()); - } - }, (int) reloadInterval, 100000); - } finally { - fileMonitoringTimer.cancel(); - } - } - - @Test (timeout = 30000) - public void testReloadMissingTrustStore() throws Exception { - KeyPair kp = generateKeyPair("RSA"); - X509Certificate cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); - String keystoreLocation = BASEDIR + "/testmissing.jks"; - createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), cert1); - - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); - ReloadingX509KeystoreManager tm = - new ReloadingX509KeystoreManager("jks", keystoreLocation, - "password", - "password"); - try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); - assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); - - assertFalse(reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); - - // Wait for the first reload to happen so we actually detect a change after the delete - Thread.sleep((reloadInterval+ 1000)); - - new File(keystoreLocation).delete(); - - // Wait for the reload to happen and log to get written to - Thread.sleep((reloadInterval+ 1000)); - - waitForFailedReloadAtLeastOnce((int) reloadInterval); - - assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); - } finally { - reloaderLog.stopCapturing(); - fileMonitoringTimer.cancel(); - } - } - - - @Test (timeout = 30000) - public void testReloadCorruptTrustStore() throws Exception { - KeyPair kp = generateKeyPair("RSA"); - X509Certificate cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); - String keystoreLocation = BASEDIR + "/testmissing.jks"; - createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), cert1); - - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); - ReloadingX509KeystoreManager tm = - new ReloadingX509KeystoreManager("jks", keystoreLocation, - "password", - "password"); - try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); - assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); - - // Wait so that the file modification time is different - Thread.sleep((reloadInterval + 1000)); - - assertFalse(reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); - OutputStream os = new FileOutputStream(keystoreLocation); - os.write(1); - os.close(); - - waitForFailedReloadAtLeastOnce((int) reloadInterval); - - assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); - } finally { - reloaderLog.stopCapturing(); - fileMonitoringTimer.cancel(); - } - } - - /**Wait for the reloader thread to load the configurations at least once - * by probing the log of the thread if the reload fails. - */ - private void waitForFailedReloadAtLeastOnce(int reloadInterval) - throws InterruptedException, TimeoutException { - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE); - } - }, reloadInterval, 10 * 1000); - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java index 63589592f35dd..441f552649298 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java @@ -30,12 +30,10 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; -import java.nio.file.Paths; import java.security.KeyPair; import java.security.cert.X509Certificate; import java.util.HashMap; import java.util.Map; -import java.util.Timer; import java.util.concurrent.TimeoutException; import static org.junit.Assert.assertEquals; @@ -52,7 +50,7 @@ public class TestReloadingX509TrustManager { private X509Certificate cert1; private X509Certificate cert2; private final LogCapturer reloaderLog = LogCapturer.captureLogs( - FileMonitoringTimerTask.LOG); + ReloadingX509TrustManager.LOG); @BeforeClass public static void setUp() throws Exception { @@ -66,7 +64,12 @@ public void testLoadMissingTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testmissing.jks"; ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password"); + new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + try { + tm.init(); + } finally { + tm.destroy(); + } } @Test(expected = IOException.class) @@ -77,7 +80,12 @@ public void testLoadCorruptTrustStore() throws Exception { os.close(); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password"); + new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + try { + tm.init(); + } finally { + tm.destroy(); + } } @Test (timeout = 30000) @@ -88,17 +96,14 @@ public void testReload() throws Exception { String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password"); + new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + tm.init(); assertEquals(1, tm.getAcceptedIssuers().length); // Wait so that the file modification time is different - Thread.sleep((reloadInterval+ 1000)); + Thread.sleep((tm.getReloadInterval() + 1000)); // Add another cert Map certs = new HashMap(); @@ -111,9 +116,9 @@ public void testReload() throws Exception { public Boolean get() { return tm.getAcceptedIssuers().length == 2; } - }, (int) reloadInterval, 100000); + }, (int) tm.getReloadInterval(), 10000); } finally { - fileMonitoringTimer.cancel(); + tm.destroy(); } } @@ -125,38 +130,27 @@ public void testReloadMissingTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testmissing.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password"); + new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + tm.init(); assertEquals(1, tm.getAcceptedIssuers().length); X509Certificate cert = tm.getAcceptedIssuers()[0]; assertFalse(reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); - - // Wait for the first reload to happen so we actually detect a change after the delete - Thread.sleep((reloadInterval+ 1000)); - + ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE)); new File(truststoreLocation).delete(); - // Wait for the reload to happen and log to get written to - Thread.sleep((reloadInterval+ 1000)); - - waitForFailedReloadAtLeastOnce((int) reloadInterval); + waitForFailedReloadAtLeastOnce((int) tm.getReloadInterval()); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); - fileMonitoringTimer.cancel(); + tm.destroy(); } } - @Test (timeout = 30000) public void testReloadCorruptTrustStore() throws Exception { KeyPair kp = generateKeyPair("RSA"); @@ -165,32 +159,29 @@ public void testReloadCorruptTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testcorrupt.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password"); + new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + tm.init(); assertEquals(1, tm.getAcceptedIssuers().length); final X509Certificate cert = tm.getAcceptedIssuers()[0]; // Wait so that the file modification time is different - Thread.sleep((reloadInterval + 1000)); + Thread.sleep((tm.getReloadInterval() + 1000)); assertFalse(reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE)); OutputStream os = new FileOutputStream(truststoreLocation); os.write(1); os.close(); - waitForFailedReloadAtLeastOnce((int) reloadInterval); + waitForFailedReloadAtLeastOnce((int) tm.getReloadInterval()); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); - fileMonitoringTimer.cancel(); + tm.destroy(); } } @@ -203,7 +194,7 @@ private void waitForFailedReloadAtLeastOnce(int reloadInterval) @Override public Boolean get() { return reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE); + ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE); } }, reloadInterval, 10 * 1000); } @@ -217,15 +208,13 @@ public void testNoPassword() throws Exception { String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, null); + new ReloadingX509TrustManager("jks", truststoreLocation, null, 10); try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(truststoreLocation), tm::loadFrom,null), 10, 10); + tm.init(); assertEquals(1, tm.getAcceptedIssuers().length); } finally { - fileMonitoringTimer.cancel(); + tm.destroy(); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index eebe0baaf2356..108ce50420640 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -27,7 +27,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; -import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.test.GenericTestUtils; @@ -100,7 +99,7 @@ public void testSSLFactoryCleanup() throws Exception { Thread reloaderThread = null; for (Thread thread : threads) { if ((thread.getName() != null) - && (thread.getName().contains(SSL_MONITORING_THREAD_NAME))) { + && (thread.getName().contains("Truststore reloader thread"))) { reloaderThread = thread; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java index 4d9c32067339b..9f9564a310641 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java @@ -41,7 +41,6 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; -import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME; import org.apache.hadoop.test.TestGenericTestUtils; import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; @@ -477,7 +476,7 @@ public void testTimelineClientCleanup() throws Exception { Thread reloaderThread = null; for (Thread thread : threads) { if ((thread.getName() != null) - && (thread.getName().contains(SSL_MONITORING_THREAD_NAME))) { + && (thread.getName().contains("Truststore reloader thread"))) { reloaderThread = thread; } } diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index caa9a1b6c0bc6..dff9a2b08a9f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -11,8 +11,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. ---> - +--> 4.0.0 org.apache.hadoop From 33c83358b458cfc36fb2dd3cad338d9e7f917cbb Mon Sep 17 00:00:00 2001 From: stack Date: Wed, 31 Mar 2021 10:40:20 -0700 Subject: [PATCH 175/666] HADOOP-16524. Automatic keystore reloading for HttpServer2 Reapply of issue reverted first because it caused yarn failures and then again because the commit message was incorrectly formatted. --- .../org/apache/hadoop/http/HttpServer2.java | 62 ++++- .../ssl/FileBasedKeyStoresFactory.java | 222 +++++++++++------- .../security/ssl/FileMonitoringTimerTask.java | 85 +++++++ .../ssl/ReloadingX509KeystoreManager.java | 157 +++++++++++++ .../ssl/ReloadingX509TrustManager.java | 95 ++------ .../ssl/TestReloadingX509KeyManager.java | 205 ++++++++++++++++ .../ssl/TestReloadingX509TrustManager.java | 77 +++--- .../hdfs/web/TestURLConnectionFactory.java | 3 +- .../client/api/impl/TestTimelineClient.java | 3 +- hadoop-yarn-project/hadoop-yarn/pom.xml | 3 +- 10 files changed, 715 insertions(+), 197 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 7534cba45ee13..dde27dbec497a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -27,14 +27,17 @@ import java.net.MalformedURLException; import java.net.URI; import java.net.URL; -import java.util.Arrays; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Enumeration; -import java.util.HashMap; +import java.nio.file.Paths; import java.util.List; +import java.util.ArrayList; import java.util.Map; +import java.util.HashMap; +import java.util.Collections; +import java.util.Optional; import java.util.Properties; +import java.util.Enumeration; +import java.util.Arrays; +import java.util.Timer; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -75,6 +78,8 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory; +import org.apache.hadoop.security.ssl.FileMonitoringTimerTask; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; @@ -186,6 +191,7 @@ public final class HttpServer2 implements FilterContainer { static final String STATE_DESCRIPTION_ALIVE = " - alive"; static final String STATE_DESCRIPTION_NOT_LIVE = " - not live"; private final SignerSecretProvider secretProvider; + private final Optional configurationChangeMonitor; private XFrameOption xFrameOption; private boolean xFrameOptionIsEnabled; public static final String HTTP_HEADER_PREFIX = "hadoop.http.header."; @@ -244,6 +250,8 @@ public static class Builder { private boolean sniHostCheckEnabled; + private Optional configurationChangeMonitor = Optional.empty(); + public Builder setName(String name){ this.name = name; return this; @@ -574,12 +582,45 @@ private ServerConnector createHttpsChannelConnector( } setEnabledProtocols(sslContextFactory); + + long storesReloadInterval = + conf.getLong(FileBasedKeyStoresFactory.SSL_STORES_RELOAD_INTERVAL_TPL_KEY, + FileBasedKeyStoresFactory.DEFAULT_SSL_STORES_RELOAD_INTERVAL); + + if (storesReloadInterval > 0) { + this.configurationChangeMonitor = Optional.of( + this.makeConfigurationChangeMonitor(storesReloadInterval, sslContextFactory)); + } + conn.addFirstConnectionFactory(new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString())); return conn; } + private Timer makeConfigurationChangeMonitor(long reloadInterval, + SslContextFactory.Server sslContextFactory) { + java.util.Timer timer = new java.util.Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + // + // The Jetty SSLContextFactory provides a 'reload' method which will reload both + // truststore and keystore certificates. + // + timer.schedule(new FileMonitoringTimerTask( + Paths.get(keyStore), + path -> { + LOG.info("Reloading certificates from store keystore " + keyStore); + try { + sslContextFactory.reload(factory -> { }); + } catch (Exception ex) { + LOG.error("Failed to reload SSL keystore certificates", ex); + } + },null), + reloadInterval, + reloadInterval + ); + return timer; + } + private void setEnabledProtocols(SslContextFactory sslContextFactory) { String enabledProtocols = conf.get(SSLFactory.SSL_ENABLED_PROTOCOLS_KEY, SSLFactory.SSL_ENABLED_PROTOCOLS_DEFAULT); @@ -622,6 +663,7 @@ private HttpServer2(final Builder b) throws IOException { this.webAppContext = createWebAppContext(b, adminsAcl, appDir); this.xFrameOptionIsEnabled = b.xFrameEnabled; this.xFrameOption = b.xFrameOption; + this.configurationChangeMonitor = b.configurationChangeMonitor; try { this.secretProvider = @@ -1420,6 +1462,16 @@ void openListeners() throws Exception { */ public void stop() throws Exception { MultiException exception = null; + if (this.configurationChangeMonitor.isPresent()) { + try { + this.configurationChangeMonitor.get().cancel(); + } catch (Exception e) { + LOG.error( + "Error while canceling configuration monitoring timer for webapp" + + webAppContext.getDisplayName(), e); + exception = addMultiException(exception, e); + } + } for (ServerConnector c : listeners) { try { c.close(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java index b184e4a152b8b..236d881581dac 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java @@ -29,20 +29,20 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManager; import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.text.MessageFormat; +import java.util.Timer; /** * {@link KeyStoresFactory} implementation that reads the certificates from * keystore files. *

- * if the trust certificates keystore file changes, the {@link TrustManager} - * is refreshed with the new trust certificate entries (using a - * {@link ReloadingX509TrustManager} trustmanager). + * If either the truststore or the keystore certificates file changes, it + * would be refreshed under the corresponding wrapper implementation - + * {@link ReloadingX509KeystoreManager} or {@link ReloadingX509TrustManager}. + *

*/ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -51,6 +51,19 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { private static final Logger LOG = LoggerFactory.getLogger(FileBasedKeyStoresFactory.class); + + /** + * The name of the timer thread monitoring file changes. + */ + public static final String SSL_MONITORING_THREAD_NAME = "SSL Certificates Store Monitor"; + + /** + * The refresh interval used to check if either of the truststore or keystore + * certificate file has changed. + */ + public static final String SSL_STORES_RELOAD_INTERVAL_TPL_KEY = + "ssl.{0}.stores.reload.interval"; + public static final String SSL_KEYSTORE_LOCATION_TPL_KEY = "ssl.{0}.keystore.location"; public static final String SSL_KEYSTORE_PASSWORD_TPL_KEY = @@ -77,14 +90,119 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { public static final String DEFAULT_KEYSTORE_TYPE = "jks"; /** - * Reload interval in milliseconds. + * The default time interval in milliseconds used to check if either + * of the truststore or keystore certificates file has changed and needs reloading. */ - public static final int DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL = 10000; + public static final int DEFAULT_SSL_STORES_RELOAD_INTERVAL = 10000; private Configuration conf; private KeyManager[] keyManagers; private TrustManager[] trustManagers; private ReloadingX509TrustManager trustManager; + private Timer fileMonitoringTimer; + + + private void createTrustManagersFromConfiguration(SSLFactory.Mode mode, + String truststoreType, + String truststoreLocation, + long storesReloadInterval) + throws IOException, GeneralSecurityException { + String passwordProperty = resolvePropertyName(mode, + SSL_TRUSTSTORE_PASSWORD_TPL_KEY); + String truststorePassword = getPassword(conf, passwordProperty, ""); + if (truststorePassword.isEmpty()) { + // An empty trust store password is legal; the trust store password + // is only required when writing to a trust store. Otherwise it's + // an optional integrity check. + truststorePassword = null; + } + + // Check if obsolete truststore specific reload interval is present for backward compatible + long truststoreReloadInterval = + conf.getLong( + resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), + storesReloadInterval); + + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation + + ", reloading at " + truststoreReloadInterval + " millis."); + } + + trustManager = new ReloadingX509TrustManager( + truststoreType, + truststoreLocation, + truststorePassword); + + if (truststoreReloadInterval > 0) { + fileMonitoringTimer.schedule( + new FileMonitoringTimerTask( + Paths.get(truststoreLocation), + path -> trustManager.loadFrom(path), + exception -> LOG.error(ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE, exception)), + truststoreReloadInterval, + truststoreReloadInterval); + } + + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); + } + trustManagers = new TrustManager[]{trustManager}; + } + + /** + * Implements logic of initializing the KeyManagers with the options + * to reload keystores. + * @param mode client or server + * @param keystoreType The keystore type. + * @param storesReloadInterval The interval to check if the keystore certificates + * file has changed. + */ + private void createKeyManagersFromConfiguration(SSLFactory.Mode mode, + String keystoreType, long storesReloadInterval) + throws GeneralSecurityException, IOException { + String locationProperty = + resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY); + String keystoreLocation = conf.get(locationProperty, ""); + if (keystoreLocation.isEmpty()) { + throw new GeneralSecurityException("The property '" + locationProperty + + "' has not been set in the ssl configuration file."); + } + String passwordProperty = + resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY); + String keystorePassword = getPassword(conf, passwordProperty, ""); + if (keystorePassword.isEmpty()) { + throw new GeneralSecurityException("The property '" + passwordProperty + + "' has not been set in the ssl configuration file."); + } + String keyPasswordProperty = + resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY); + // Key password defaults to the same value as store password for + // compatibility with legacy configurations that did not use a separate + // configuration property for key password. + String keystoreKeyPassword = getPassword( + conf, keyPasswordProperty, keystorePassword); + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation); + } + + ReloadingX509KeystoreManager keystoreManager = new ReloadingX509KeystoreManager( + keystoreType, + keystoreLocation, + keystorePassword, + keystoreKeyPassword); + + if (storesReloadInterval > 0) { + fileMonitoringTimer.schedule( + new FileMonitoringTimerTask( + Paths.get(keystoreLocation), + path -> keystoreManager.loadFrom(path), + exception -> LOG.error(ReloadingX509KeystoreManager.RELOAD_ERROR_MESSAGE, exception)), + storesReloadInterval, + storesReloadInterval); + } + + keyManagers = new KeyManager[] { keystoreManager }; + } /** * Resolves a property name to its client/server version if applicable. @@ -139,56 +257,28 @@ public void init(SSLFactory.Mode mode) conf.getBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, SSLFactory.SSL_REQUIRE_CLIENT_CERT_DEFAULT); + long storesReloadInterval = conf.getLong( + resolvePropertyName(mode, SSL_STORES_RELOAD_INTERVAL_TPL_KEY), + DEFAULT_SSL_STORES_RELOAD_INTERVAL); + + fileMonitoringTimer = new Timer(SSL_MONITORING_THREAD_NAME, true); + // certificate store String keystoreType = - conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY), - DEFAULT_KEYSTORE_TYPE); - KeyStore keystore = KeyStore.getInstance(keystoreType); - String keystoreKeyPassword = null; - if (requireClientCert || mode == SSLFactory.Mode.SERVER) { - String locationProperty = - resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY); - String keystoreLocation = conf.get(locationProperty, ""); - if (keystoreLocation.isEmpty()) { - throw new GeneralSecurityException("The property '" + locationProperty + - "' has not been set in the ssl configuration file."); - } - String passwordProperty = - resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY); - String keystorePassword = getPassword(conf, passwordProperty, ""); - if (keystorePassword.isEmpty()) { - throw new GeneralSecurityException("The property '" + passwordProperty + - "' has not been set in the ssl configuration file."); - } - String keyPasswordProperty = - resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY); - // Key password defaults to the same value as store password for - // compatibility with legacy configurations that did not use a separate - // configuration property for key password. - keystoreKeyPassword = getPassword( - conf, keyPasswordProperty, keystorePassword); - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation); - } + conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY), + DEFAULT_KEYSTORE_TYPE); - InputStream is = Files.newInputStream(Paths.get(keystoreLocation)); - try { - keystore.load(is, keystorePassword.toCharArray()); - } finally { - is.close(); - } - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " Loaded KeyStore: " + keystoreLocation); - } + if (requireClientCert || mode == SSLFactory.Mode.SERVER) { + createKeyManagersFromConfiguration(mode, keystoreType, storesReloadInterval); } else { + KeyStore keystore = KeyStore.getInstance(keystoreType); keystore.load(null, null); + KeyManagerFactory keyMgrFactory = KeyManagerFactory + .getInstance(SSLFactory.SSLCERTIFICATE); + + keyMgrFactory.init(keystore, null); + keyManagers = keyMgrFactory.getKeyManagers(); } - KeyManagerFactory keyMgrFactory = KeyManagerFactory - .getInstance(SSLFactory.SSLCERTIFICATE); - - keyMgrFactory.init(keystore, (keystoreKeyPassword != null) ? - keystoreKeyPassword.toCharArray() : null); - keyManagers = keyMgrFactory.getKeyManagers(); //trust store String truststoreType = @@ -199,33 +289,7 @@ public void init(SSLFactory.Mode mode) resolvePropertyName(mode, SSL_TRUSTSTORE_LOCATION_TPL_KEY); String truststoreLocation = conf.get(locationProperty, ""); if (!truststoreLocation.isEmpty()) { - String passwordProperty = resolvePropertyName(mode, - SSL_TRUSTSTORE_PASSWORD_TPL_KEY); - String truststorePassword = getPassword(conf, passwordProperty, ""); - if (truststorePassword.isEmpty()) { - // An empty trust store password is legal; the trust store password - // is only required when writing to a trust store. Otherwise it's - // an optional integrity check. - truststorePassword = null; - } - long truststoreReloadInterval = - conf.getLong( - resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), - DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL); - - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation); - } - - trustManager = new ReloadingX509TrustManager(truststoreType, - truststoreLocation, - truststorePassword, - truststoreReloadInterval); - trustManager.init(); - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); - } - trustManagers = new TrustManager[]{trustManager}; + createTrustManagersFromConfiguration(mode, truststoreType, truststoreLocation, storesReloadInterval); } else { if (LOG.isDebugEnabled()) { LOG.debug("The property '" + locationProperty + "' has not been set, " + @@ -256,7 +320,7 @@ String getPassword(Configuration conf, String alias, String defaultPass) { @Override public synchronized void destroy() { if (trustManager != null) { - trustManager.destroy(); + fileMonitoringTimer.cancel(); trustManager = null; keyManagers = null; trustManagers = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java new file mode 100644 index 0000000000000..40b61978ef1f4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security.ssl; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.file.Path; +import java.util.TimerTask; +import java.util.function.Consumer; + +/** + * Implements basic logic to track when a file changes on disk and call the action + * passed to the constructor when it does. An exception handler can optionally also be specified + * in the constructor, otherwise any exception occurring during process will be logged + * using this class' logger. + */ +@InterfaceAudience.Private +public class FileMonitoringTimerTask extends TimerTask { + + static final Logger LOG = LoggerFactory.getLogger(FileMonitoringTimerTask.class); + + @VisibleForTesting + static final String PROCESS_ERROR_MESSAGE = + "Could not process file change : "; + + final private Path filePath; + final private Consumer onFileChange; + final Consumer onChangeFailure; + private long lastProcessed; + + /** + * Create file monitoring task to be scheduled using a standard Java {@link java.util.Timer} + * instance. + * + * @param filePath The path to the file to monitor. + * @param onFileChange The function to call when the file has changed. + * @param onChangeFailure The function to call when an exception is thrown during the + * file change processing. + */ + public FileMonitoringTimerTask(Path filePath, Consumer onFileChange, + Consumer onChangeFailure) { + Preconditions.checkNotNull(filePath, "path to monitor disk file is not set"); + Preconditions.checkNotNull(onFileChange, "action to monitor disk file is not set"); + + this.filePath = filePath; + this.lastProcessed = filePath.toFile().lastModified(); + this.onFileChange = onFileChange; + this.onChangeFailure = onChangeFailure; + } + + @Override + public void run() { + if (lastProcessed != filePath.toFile().lastModified()) { + try { + onFileChange.accept(filePath); + } catch (Throwable t) { + if (onChangeFailure != null) { + onChangeFailure.accept(t); + } else { + LOG.error(PROCESS_ERROR_MESSAGE + filePath.toString(), t); + } + } + lastProcessed = filePath.toFile().lastModified(); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java new file mode 100644 index 0000000000000..72e8b6b63b420 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security.ssl; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.*; +import java.io.IOException; +import java.io.InputStream; +import java.net.Socket; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.Principal; +import java.security.PrivateKey; +import java.security.cert.X509Certificate; +import java.util.concurrent.atomic.AtomicReference; + +/** + * An implementation of X509KeyManager that exposes a method, + * {@link #loadFrom(Path)} to reload its configuration. Note that it is necessary + * to implement the X509ExtendedKeyManager to properly delegate + * the additional methods, otherwise the SSL handshake will fail. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ReloadingX509KeystoreManager extends X509ExtendedKeyManager { + + private static final Logger LOG = LoggerFactory.getLogger(ReloadingX509TrustManager.class); + + static final String RELOAD_ERROR_MESSAGE = + "Could not load keystore (keep using existing one) : "; + + final private String type; + final private String storePassword; + final private String keyPassword; + private AtomicReference keyManagerRef; + + /** + * Construct a Reloading509KeystoreManager + * + * @param type type of keystore file, typically 'jks'. + * @param location local path to the keystore file. + * @param storePassword password of the keystore file. + * @param keyPassword The password of the key. + * @throws IOException + * @throws GeneralSecurityException + */ + public ReloadingX509KeystoreManager(String type, String location, + String storePassword, String keyPassword) + throws IOException, GeneralSecurityException { + this.type = type; + this.storePassword = storePassword; + this.keyPassword = keyPassword; + keyManagerRef = new AtomicReference(); + keyManagerRef.set(loadKeyManager(Paths.get(location))); + } + + @Override + public String chooseEngineClientAlias(String[] strings, Principal[] principals, + SSLEngine sslEngine) { + return keyManagerRef.get().chooseEngineClientAlias(strings, principals, sslEngine); + } + + @Override + public String chooseEngineServerAlias(String s, Principal[] principals, + SSLEngine sslEngine) { + return keyManagerRef.get().chooseEngineServerAlias(s, principals, sslEngine); + } + + @Override + public String[] getClientAliases(String s, Principal[] principals) { + return keyManagerRef.get().getClientAliases(s, principals); + } + + @Override + public String chooseClientAlias(String[] strings, Principal[] principals, + Socket socket) { + return keyManagerRef.get().chooseClientAlias(strings, principals, socket); + } + + @Override + public String[] getServerAliases(String s, Principal[] principals) { + return keyManagerRef.get().getServerAliases(s, principals); + } + + @Override + public String chooseServerAlias(String s, Principal[] principals, + Socket socket) { + return keyManagerRef.get().chooseServerAlias(s, principals, socket); + } + + @Override + public X509Certificate[] getCertificateChain(String s) { + return keyManagerRef.get().getCertificateChain(s); + } + + @Override + public PrivateKey getPrivateKey(String s) { + return keyManagerRef.get().getPrivateKey(s); + } + + public ReloadingX509KeystoreManager loadFrom(Path path) { + try { + this.keyManagerRef.set(loadKeyManager(path)); + } catch (Exception ex) { + // The Consumer.accept interface forces us to convert to unchecked + throw new RuntimeException(ex); + } + return this; + } + + private X509ExtendedKeyManager loadKeyManager(Path path) + throws IOException, GeneralSecurityException { + + X509ExtendedKeyManager keyManager = null; + KeyStore keystore = KeyStore.getInstance(type); + + try (InputStream is = Files.newInputStream(path)) { + keystore.load(is, this.storePassword.toCharArray()); + } + + LOG.debug(" Loaded KeyStore: " + path.toFile().getAbsolutePath()); + + KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance( + SSLFactory.SSLCERTIFICATE); + keyMgrFactory.init(keystore, + (keyPassword != null) ? keyPassword.toCharArray() : null); + for (KeyManager candidate: keyMgrFactory.getKeyManagers()) { + if (candidate instanceof X509ExtendedKeyManager) { + keyManager = (X509ExtendedKeyManager)candidate; + break; + } + } + return keyManager; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java index 7430477932292..68fd4c161005c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java @@ -32,6 +32,8 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.cert.CertificateException; @@ -39,31 +41,23 @@ import java.util.concurrent.atomic.AtomicReference; /** - * A {@link TrustManager} implementation that reloads its configuration when - * the truststore file on disk changes. + * A {@link TrustManager} implementation that exposes a method, {@link #loadFrom(Path)} + * to reload its configuration for example when the truststore file on disk changes. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public final class ReloadingX509TrustManager - implements X509TrustManager, Runnable { +public final class ReloadingX509TrustManager implements X509TrustManager { - @VisibleForTesting static final Logger LOG = LoggerFactory.getLogger(ReloadingX509TrustManager.class); - @VisibleForTesting + static final String RELOAD_ERROR_MESSAGE = "Could not load truststore (keep using existing one) : "; private String type; - private File file; private String password; - private long lastLoaded; - private long reloadInterval; private AtomicReference trustManagerRef; - private volatile boolean running; - private Thread reloader; - /** * Creates a reloadable trustmanager. The trustmanager reloads itself * if the underlying trustore file has changed. @@ -71,49 +65,18 @@ public final class ReloadingX509TrustManager * @param type type of truststore file, typically 'jks'. * @param location local path to the truststore file. * @param password password of the truststore file. - * @param reloadInterval interval to check if the truststore file has * changed, in milliseconds. * @throws IOException thrown if the truststore could not be initialized due * to an IO error. * @throws GeneralSecurityException thrown if the truststore could not be * initialized due to a security error. */ - public ReloadingX509TrustManager(String type, String location, - String password, long reloadInterval) + public ReloadingX509TrustManager(String type, String location, String password) throws IOException, GeneralSecurityException { this.type = type; - file = new File(location); this.password = password; trustManagerRef = new AtomicReference(); - trustManagerRef.set(loadTrustManager()); - this.reloadInterval = reloadInterval; - } - - /** - * Starts the reloader thread. - */ - public void init() { - reloader = new Thread(this, "Truststore reloader thread"); - reloader.setDaemon(true); - running = true; - reloader.start(); - } - - /** - * Stops the reloader thread. - */ - public void destroy() { - running = false; - reloader.interrupt(); - } - - /** - * Returns the reload check interval. - * - * @return the reload check interval, in milliseconds. - */ - public long getReloadInterval() { - return reloadInterval; + trustManagerRef.set(loadTrustManager(Paths.get(location))); } @Override @@ -151,27 +114,24 @@ public X509Certificate[] getAcceptedIssuers() { return issuers; } - boolean needsReload() { - boolean reload = true; - if (file.exists()) { - if (file.lastModified() == lastLoaded) { - reload = false; - } - } else { - lastLoaded = 0; + public ReloadingX509TrustManager loadFrom(Path path) { + try { + this.trustManagerRef.set(loadTrustManager(path)); + } catch (Exception ex) { + // The Consumer.accept interface forces us to convert to unchecked + throw new RuntimeException(RELOAD_ERROR_MESSAGE, ex); } - return reload; + return this; } - X509TrustManager loadTrustManager() + X509TrustManager loadTrustManager(Path path) throws IOException, GeneralSecurityException { X509TrustManager trustManager = null; KeyStore ks = KeyStore.getInstance(type); - InputStream in = Files.newInputStream(file.toPath()); + InputStream in = Files.newInputStream(path); try { ks.load(in, (password == null) ? null : password.toCharArray()); - lastLoaded = file.lastModified(); - LOG.debug("Loaded truststore '" + file + "'"); + LOG.debug("Loaded truststore '" + path + "'"); } finally { in.close(); } @@ -188,23 +148,4 @@ X509TrustManager loadTrustManager() } return trustManager; } - - @Override - public void run() { - while (running) { - try { - Thread.sleep(reloadInterval); - } catch (InterruptedException e) { - //NOP - } - if (running && needsReload()) { - try { - trustManagerRef.set(loadTrustManager()); - } catch (Exception ex) { - LOG.warn(RELOAD_ERROR_MESSAGE + ex.toString(), ex); - } - } - } - } - } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java new file mode 100644 index 0000000000000..bf0a6abdc676d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security.ssl; + +import org.apache.hadoop.thirdparty.com.google.common.base.Supplier; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Paths; +import java.security.KeyPair; +import java.security.cert.X509Certificate; +import java.util.Timer; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +public class TestReloadingX509KeyManager { + + private static final String BASEDIR = GenericTestUtils.getTempPath( + TestReloadingX509TrustManager.class.getSimpleName()); + + private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs( + FileMonitoringTimerTask.LOG); + + @BeforeClass + public static void setUp() throws Exception { + File base = new File(BASEDIR); + FileUtil.fullyDelete(base); + base.mkdirs(); + } + + @Test(expected = IOException.class) + public void testLoadMissingKeyStore() throws Exception { + String keystoreLocation = BASEDIR + "/testmissing.jks"; + + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + } + + @Test(expected = IOException.class) + public void testLoadCorruptKeyStore() throws Exception { + String keystoreLocation = BASEDIR + "/testcorrupt.jks"; + OutputStream os = new FileOutputStream(keystoreLocation); + os.write(1); + os.close(); + + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + } + + @Test (timeout = 3000000) + public void testReload() throws Exception { + KeyPair kp = generateKeyPair("RSA"); + X509Certificate sCert = generateCertificate("CN=localhost, O=server", kp, 30, + "SHA1withRSA"); + String keystoreLocation = BASEDIR + "/testreload.jks"; + createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), sCert); + + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + try { + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + + // Wait so that the file modification time is different + Thread.sleep((reloadInterval+ 1000)); + + // Change the certificate with a new keypair + final KeyPair anotherKP = generateKeyPair("RSA"); + sCert = KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", anotherKP, 30, + "SHA1withRSA"); + createKeyStore(keystoreLocation, "password", "cert1", anotherKP.getPrivate(), sCert); + + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return tm.getPrivateKey("cert1").equals(kp.getPrivate()); + } + }, (int) reloadInterval, 100000); + } finally { + fileMonitoringTimer.cancel(); + } + } + + @Test (timeout = 30000) + public void testReloadMissingTrustStore() throws Exception { + KeyPair kp = generateKeyPair("RSA"); + X509Certificate cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); + String keystoreLocation = BASEDIR + "/testmissing.jks"; + createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), cert1); + + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + try { + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + + assertFalse(reloaderLog.getOutput().contains( + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + + // Wait for the first reload to happen so we actually detect a change after the delete + Thread.sleep((reloadInterval+ 1000)); + + new File(keystoreLocation).delete(); + + // Wait for the reload to happen and log to get written to + Thread.sleep((reloadInterval+ 1000)); + + waitForFailedReloadAtLeastOnce((int) reloadInterval); + + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + } finally { + reloaderLog.stopCapturing(); + fileMonitoringTimer.cancel(); + } + } + + + @Test (timeout = 30000) + public void testReloadCorruptTrustStore() throws Exception { + KeyPair kp = generateKeyPair("RSA"); + X509Certificate cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); + String keystoreLocation = BASEDIR + "/testmissing.jks"; + createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), cert1); + + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + try { + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + + // Wait so that the file modification time is different + Thread.sleep((reloadInterval + 1000)); + + assertFalse(reloaderLog.getOutput().contains( + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + OutputStream os = new FileOutputStream(keystoreLocation); + os.write(1); + os.close(); + + waitForFailedReloadAtLeastOnce((int) reloadInterval); + + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + } finally { + reloaderLog.stopCapturing(); + fileMonitoringTimer.cancel(); + } + } + + /**Wait for the reloader thread to load the configurations at least once + * by probing the log of the thread if the reload fails. + */ + private void waitForFailedReloadAtLeastOnce(int reloadInterval) + throws InterruptedException, TimeoutException { + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return reloaderLog.getOutput().contains( + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE); + } + }, reloadInterval, 10 * 1000); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java index 441f552649298..63589592f35dd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java @@ -30,10 +30,12 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; +import java.nio.file.Paths; import java.security.KeyPair; import java.security.cert.X509Certificate; import java.util.HashMap; import java.util.Map; +import java.util.Timer; import java.util.concurrent.TimeoutException; import static org.junit.Assert.assertEquals; @@ -50,7 +52,7 @@ public class TestReloadingX509TrustManager { private X509Certificate cert1; private X509Certificate cert2; private final LogCapturer reloaderLog = LogCapturer.captureLogs( - ReloadingX509TrustManager.LOG); + FileMonitoringTimerTask.LOG); @BeforeClass public static void setUp() throws Exception { @@ -64,12 +66,7 @@ public void testLoadMissingTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testmissing.jks"; ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); - try { - tm.init(); - } finally { - tm.destroy(); - } + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); } @Test(expected = IOException.class) @@ -80,12 +77,7 @@ public void testLoadCorruptTrustStore() throws Exception { os.close(); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); - try { - tm.init(); - } finally { - tm.destroy(); - } + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); } @Test (timeout = 30000) @@ -96,14 +88,17 @@ public void testReload() throws Exception { String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); // Wait so that the file modification time is different - Thread.sleep((tm.getReloadInterval() + 1000)); + Thread.sleep((reloadInterval+ 1000)); // Add another cert Map certs = new HashMap(); @@ -116,9 +111,9 @@ public void testReload() throws Exception { public Boolean get() { return tm.getAcceptedIssuers().length == 2; } - }, (int) tm.getReloadInterval(), 10000); + }, (int) reloadInterval, 100000); } finally { - tm.destroy(); + fileMonitoringTimer.cancel(); } } @@ -130,27 +125,38 @@ public void testReloadMissingTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testmissing.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); X509Certificate cert = tm.getAcceptedIssuers()[0]; assertFalse(reloaderLog.getOutput().contains( - ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE)); + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + + // Wait for the first reload to happen so we actually detect a change after the delete + Thread.sleep((reloadInterval+ 1000)); + new File(truststoreLocation).delete(); - waitForFailedReloadAtLeastOnce((int) tm.getReloadInterval()); + // Wait for the reload to happen and log to get written to + Thread.sleep((reloadInterval+ 1000)); + + waitForFailedReloadAtLeastOnce((int) reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); - tm.destroy(); + fileMonitoringTimer.cancel(); } } + @Test (timeout = 30000) public void testReloadCorruptTrustStore() throws Exception { KeyPair kp = generateKeyPair("RSA"); @@ -159,29 +165,32 @@ public void testReloadCorruptTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testcorrupt.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); final X509Certificate cert = tm.getAcceptedIssuers()[0]; // Wait so that the file modification time is different - Thread.sleep((tm.getReloadInterval() + 1000)); + Thread.sleep((reloadInterval + 1000)); assertFalse(reloaderLog.getOutput().contains( - ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE)); + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); OutputStream os = new FileOutputStream(truststoreLocation); os.write(1); os.close(); - waitForFailedReloadAtLeastOnce((int) tm.getReloadInterval()); + waitForFailedReloadAtLeastOnce((int) reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); - tm.destroy(); + fileMonitoringTimer.cancel(); } } @@ -194,7 +203,7 @@ private void waitForFailedReloadAtLeastOnce(int reloadInterval) @Override public Boolean get() { return reloaderLog.getOutput().contains( - ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE); + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE); } }, reloadInterval, 10 * 1000); } @@ -208,13 +217,15 @@ public void testNoPassword() throws Exception { String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, null, 10); + new ReloadingX509TrustManager("jks", truststoreLocation, null); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), 10, 10); assertEquals(1, tm.getAcceptedIssuers().length); } finally { - tm.destroy(); + fileMonitoringTimer.cancel(); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index 108ce50420640..eebe0baaf2356 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; +import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.test.GenericTestUtils; @@ -99,7 +100,7 @@ public void testSSLFactoryCleanup() throws Exception { Thread reloaderThread = null; for (Thread thread : threads) { if ((thread.getName() != null) - && (thread.getName().contains("Truststore reloader thread"))) { + && (thread.getName().contains(SSL_MONITORING_THREAD_NAME))) { reloaderThread = thread; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java index 9f9564a310641..4d9c32067339b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java @@ -41,6 +41,7 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; +import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME; import org.apache.hadoop.test.TestGenericTestUtils; import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; @@ -476,7 +477,7 @@ public void testTimelineClientCleanup() throws Exception { Thread reloaderThread = null; for (Thread thread : threads) { if ((thread.getName() != null) - && (thread.getName().contains("Truststore reloader thread"))) { + && (thread.getName().contains(SSL_MONITORING_THREAD_NAME))) { reloaderThread = thread; } } diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index dff9a2b08a9f9..caa9a1b6c0bc6 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -11,7 +11,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. ---> +--> + 4.0.0 org.apache.hadoop From 8327dd26078eb534e4627dbcb9bda0a056672047 Mon Sep 17 00:00:00 2001 From: stack Date: Wed, 31 Mar 2021 10:43:09 -0700 Subject: [PATCH 176/666] Revert "HADOOP-16524. Automatic keystore reloading for HttpServer2" This reverts commit a2975d215371bc693363683338bfa42a0cd5b6d9. --- .../org/apache/hadoop/http/HttpServer2.java | 62 +---- .../ssl/FileBasedKeyStoresFactory.java | 222 +++++++----------- .../security/ssl/FileMonitoringTimerTask.java | 85 ------- .../ssl/ReloadingX509KeystoreManager.java | 157 ------------- .../ssl/ReloadingX509TrustManager.java | 95 ++++++-- .../ssl/TestReloadingX509KeyManager.java | 205 ---------------- .../ssl/TestReloadingX509TrustManager.java | 77 +++--- .../hdfs/web/TestURLConnectionFactory.java | 3 +- .../client/api/impl/TestTimelineClient.java | 3 +- hadoop-yarn-project/hadoop-yarn/pom.xml | 3 +- 10 files changed, 197 insertions(+), 715 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index dde27dbec497a..7534cba45ee13 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -27,17 +27,14 @@ import java.net.MalformedURLException; import java.net.URI; import java.net.URL; -import java.nio.file.Paths; -import java.util.List; +import java.util.Arrays; import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; import java.util.Collections; -import java.util.Optional; -import java.util.Properties; import java.util.Enumeration; -import java.util.Arrays; -import java.util.Timer; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -78,8 +75,6 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.authorize.AccessControlList; -import org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory; -import org.apache.hadoop.security.ssl.FileMonitoringTimerTask; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; @@ -191,7 +186,6 @@ public final class HttpServer2 implements FilterContainer { static final String STATE_DESCRIPTION_ALIVE = " - alive"; static final String STATE_DESCRIPTION_NOT_LIVE = " - not live"; private final SignerSecretProvider secretProvider; - private final Optional configurationChangeMonitor; private XFrameOption xFrameOption; private boolean xFrameOptionIsEnabled; public static final String HTTP_HEADER_PREFIX = "hadoop.http.header."; @@ -250,8 +244,6 @@ public static class Builder { private boolean sniHostCheckEnabled; - private Optional configurationChangeMonitor = Optional.empty(); - public Builder setName(String name){ this.name = name; return this; @@ -582,45 +574,12 @@ private ServerConnector createHttpsChannelConnector( } setEnabledProtocols(sslContextFactory); - - long storesReloadInterval = - conf.getLong(FileBasedKeyStoresFactory.SSL_STORES_RELOAD_INTERVAL_TPL_KEY, - FileBasedKeyStoresFactory.DEFAULT_SSL_STORES_RELOAD_INTERVAL); - - if (storesReloadInterval > 0) { - this.configurationChangeMonitor = Optional.of( - this.makeConfigurationChangeMonitor(storesReloadInterval, sslContextFactory)); - } - conn.addFirstConnectionFactory(new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString())); return conn; } - private Timer makeConfigurationChangeMonitor(long reloadInterval, - SslContextFactory.Server sslContextFactory) { - java.util.Timer timer = new java.util.Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); - // - // The Jetty SSLContextFactory provides a 'reload' method which will reload both - // truststore and keystore certificates. - // - timer.schedule(new FileMonitoringTimerTask( - Paths.get(keyStore), - path -> { - LOG.info("Reloading certificates from store keystore " + keyStore); - try { - sslContextFactory.reload(factory -> { }); - } catch (Exception ex) { - LOG.error("Failed to reload SSL keystore certificates", ex); - } - },null), - reloadInterval, - reloadInterval - ); - return timer; - } - private void setEnabledProtocols(SslContextFactory sslContextFactory) { String enabledProtocols = conf.get(SSLFactory.SSL_ENABLED_PROTOCOLS_KEY, SSLFactory.SSL_ENABLED_PROTOCOLS_DEFAULT); @@ -663,7 +622,6 @@ private HttpServer2(final Builder b) throws IOException { this.webAppContext = createWebAppContext(b, adminsAcl, appDir); this.xFrameOptionIsEnabled = b.xFrameEnabled; this.xFrameOption = b.xFrameOption; - this.configurationChangeMonitor = b.configurationChangeMonitor; try { this.secretProvider = @@ -1462,16 +1420,6 @@ void openListeners() throws Exception { */ public void stop() throws Exception { MultiException exception = null; - if (this.configurationChangeMonitor.isPresent()) { - try { - this.configurationChangeMonitor.get().cancel(); - } catch (Exception e) { - LOG.error( - "Error while canceling configuration monitoring timer for webapp" - + webAppContext.getDisplayName(), e); - exception = addMultiException(exception, e); - } - } for (ServerConnector c : listeners) { try { c.close(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java index 236d881581dac..b184e4a152b8b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java @@ -29,20 +29,20 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManager; import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.text.MessageFormat; -import java.util.Timer; /** * {@link KeyStoresFactory} implementation that reads the certificates from * keystore files. *

- * If either the truststore or the keystore certificates file changes, it - * would be refreshed under the corresponding wrapper implementation - - * {@link ReloadingX509KeystoreManager} or {@link ReloadingX509TrustManager}. - *

+ * if the trust certificates keystore file changes, the {@link TrustManager} + * is refreshed with the new trust certificate entries (using a + * {@link ReloadingX509TrustManager} trustmanager). */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -51,19 +51,6 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { private static final Logger LOG = LoggerFactory.getLogger(FileBasedKeyStoresFactory.class); - - /** - * The name of the timer thread monitoring file changes. - */ - public static final String SSL_MONITORING_THREAD_NAME = "SSL Certificates Store Monitor"; - - /** - * The refresh interval used to check if either of the truststore or keystore - * certificate file has changed. - */ - public static final String SSL_STORES_RELOAD_INTERVAL_TPL_KEY = - "ssl.{0}.stores.reload.interval"; - public static final String SSL_KEYSTORE_LOCATION_TPL_KEY = "ssl.{0}.keystore.location"; public static final String SSL_KEYSTORE_PASSWORD_TPL_KEY = @@ -90,119 +77,14 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { public static final String DEFAULT_KEYSTORE_TYPE = "jks"; /** - * The default time interval in milliseconds used to check if either - * of the truststore or keystore certificates file has changed and needs reloading. + * Reload interval in milliseconds. */ - public static final int DEFAULT_SSL_STORES_RELOAD_INTERVAL = 10000; + public static final int DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL = 10000; private Configuration conf; private KeyManager[] keyManagers; private TrustManager[] trustManagers; private ReloadingX509TrustManager trustManager; - private Timer fileMonitoringTimer; - - - private void createTrustManagersFromConfiguration(SSLFactory.Mode mode, - String truststoreType, - String truststoreLocation, - long storesReloadInterval) - throws IOException, GeneralSecurityException { - String passwordProperty = resolvePropertyName(mode, - SSL_TRUSTSTORE_PASSWORD_TPL_KEY); - String truststorePassword = getPassword(conf, passwordProperty, ""); - if (truststorePassword.isEmpty()) { - // An empty trust store password is legal; the trust store password - // is only required when writing to a trust store. Otherwise it's - // an optional integrity check. - truststorePassword = null; - } - - // Check if obsolete truststore specific reload interval is present for backward compatible - long truststoreReloadInterval = - conf.getLong( - resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), - storesReloadInterval); - - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation + - ", reloading at " + truststoreReloadInterval + " millis."); - } - - trustManager = new ReloadingX509TrustManager( - truststoreType, - truststoreLocation, - truststorePassword); - - if (truststoreReloadInterval > 0) { - fileMonitoringTimer.schedule( - new FileMonitoringTimerTask( - Paths.get(truststoreLocation), - path -> trustManager.loadFrom(path), - exception -> LOG.error(ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE, exception)), - truststoreReloadInterval, - truststoreReloadInterval); - } - - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); - } - trustManagers = new TrustManager[]{trustManager}; - } - - /** - * Implements logic of initializing the KeyManagers with the options - * to reload keystores. - * @param mode client or server - * @param keystoreType The keystore type. - * @param storesReloadInterval The interval to check if the keystore certificates - * file has changed. - */ - private void createKeyManagersFromConfiguration(SSLFactory.Mode mode, - String keystoreType, long storesReloadInterval) - throws GeneralSecurityException, IOException { - String locationProperty = - resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY); - String keystoreLocation = conf.get(locationProperty, ""); - if (keystoreLocation.isEmpty()) { - throw new GeneralSecurityException("The property '" + locationProperty + - "' has not been set in the ssl configuration file."); - } - String passwordProperty = - resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY); - String keystorePassword = getPassword(conf, passwordProperty, ""); - if (keystorePassword.isEmpty()) { - throw new GeneralSecurityException("The property '" + passwordProperty + - "' has not been set in the ssl configuration file."); - } - String keyPasswordProperty = - resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY); - // Key password defaults to the same value as store password for - // compatibility with legacy configurations that did not use a separate - // configuration property for key password. - String keystoreKeyPassword = getPassword( - conf, keyPasswordProperty, keystorePassword); - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation); - } - - ReloadingX509KeystoreManager keystoreManager = new ReloadingX509KeystoreManager( - keystoreType, - keystoreLocation, - keystorePassword, - keystoreKeyPassword); - - if (storesReloadInterval > 0) { - fileMonitoringTimer.schedule( - new FileMonitoringTimerTask( - Paths.get(keystoreLocation), - path -> keystoreManager.loadFrom(path), - exception -> LOG.error(ReloadingX509KeystoreManager.RELOAD_ERROR_MESSAGE, exception)), - storesReloadInterval, - storesReloadInterval); - } - - keyManagers = new KeyManager[] { keystoreManager }; - } /** * Resolves a property name to its client/server version if applicable. @@ -257,28 +139,56 @@ public void init(SSLFactory.Mode mode) conf.getBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, SSLFactory.SSL_REQUIRE_CLIENT_CERT_DEFAULT); - long storesReloadInterval = conf.getLong( - resolvePropertyName(mode, SSL_STORES_RELOAD_INTERVAL_TPL_KEY), - DEFAULT_SSL_STORES_RELOAD_INTERVAL); - - fileMonitoringTimer = new Timer(SSL_MONITORING_THREAD_NAME, true); - // certificate store String keystoreType = - conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY), - DEFAULT_KEYSTORE_TYPE); - + conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY), + DEFAULT_KEYSTORE_TYPE); + KeyStore keystore = KeyStore.getInstance(keystoreType); + String keystoreKeyPassword = null; if (requireClientCert || mode == SSLFactory.Mode.SERVER) { - createKeyManagersFromConfiguration(mode, keystoreType, storesReloadInterval); + String locationProperty = + resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY); + String keystoreLocation = conf.get(locationProperty, ""); + if (keystoreLocation.isEmpty()) { + throw new GeneralSecurityException("The property '" + locationProperty + + "' has not been set in the ssl configuration file."); + } + String passwordProperty = + resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY); + String keystorePassword = getPassword(conf, passwordProperty, ""); + if (keystorePassword.isEmpty()) { + throw new GeneralSecurityException("The property '" + passwordProperty + + "' has not been set in the ssl configuration file."); + } + String keyPasswordProperty = + resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY); + // Key password defaults to the same value as store password for + // compatibility with legacy configurations that did not use a separate + // configuration property for key password. + keystoreKeyPassword = getPassword( + conf, keyPasswordProperty, keystorePassword); + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation); + } + + InputStream is = Files.newInputStream(Paths.get(keystoreLocation)); + try { + keystore.load(is, keystorePassword.toCharArray()); + } finally { + is.close(); + } + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " Loaded KeyStore: " + keystoreLocation); + } } else { - KeyStore keystore = KeyStore.getInstance(keystoreType); keystore.load(null, null); - KeyManagerFactory keyMgrFactory = KeyManagerFactory - .getInstance(SSLFactory.SSLCERTIFICATE); - - keyMgrFactory.init(keystore, null); - keyManagers = keyMgrFactory.getKeyManagers(); } + KeyManagerFactory keyMgrFactory = KeyManagerFactory + .getInstance(SSLFactory.SSLCERTIFICATE); + + keyMgrFactory.init(keystore, (keystoreKeyPassword != null) ? + keystoreKeyPassword.toCharArray() : null); + keyManagers = keyMgrFactory.getKeyManagers(); //trust store String truststoreType = @@ -289,7 +199,33 @@ public void init(SSLFactory.Mode mode) resolvePropertyName(mode, SSL_TRUSTSTORE_LOCATION_TPL_KEY); String truststoreLocation = conf.get(locationProperty, ""); if (!truststoreLocation.isEmpty()) { - createTrustManagersFromConfiguration(mode, truststoreType, truststoreLocation, storesReloadInterval); + String passwordProperty = resolvePropertyName(mode, + SSL_TRUSTSTORE_PASSWORD_TPL_KEY); + String truststorePassword = getPassword(conf, passwordProperty, ""); + if (truststorePassword.isEmpty()) { + // An empty trust store password is legal; the trust store password + // is only required when writing to a trust store. Otherwise it's + // an optional integrity check. + truststorePassword = null; + } + long truststoreReloadInterval = + conf.getLong( + resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), + DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL); + + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation); + } + + trustManager = new ReloadingX509TrustManager(truststoreType, + truststoreLocation, + truststorePassword, + truststoreReloadInterval); + trustManager.init(); + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); + } + trustManagers = new TrustManager[]{trustManager}; } else { if (LOG.isDebugEnabled()) { LOG.debug("The property '" + locationProperty + "' has not been set, " + @@ -320,7 +256,7 @@ String getPassword(Configuration conf, String alias, String defaultPass) { @Override public synchronized void destroy() { if (trustManager != null) { - fileMonitoringTimer.cancel(); + trustManager.destroy(); trustManager = null; keyManagers = null; trustManagers = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java deleted file mode 100644 index 40b61978ef1f4..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.security.ssl; - -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.file.Path; -import java.util.TimerTask; -import java.util.function.Consumer; - -/** - * Implements basic logic to track when a file changes on disk and call the action - * passed to the constructor when it does. An exception handler can optionally also be specified - * in the constructor, otherwise any exception occurring during process will be logged - * using this class' logger. - */ -@InterfaceAudience.Private -public class FileMonitoringTimerTask extends TimerTask { - - static final Logger LOG = LoggerFactory.getLogger(FileMonitoringTimerTask.class); - - @VisibleForTesting - static final String PROCESS_ERROR_MESSAGE = - "Could not process file change : "; - - final private Path filePath; - final private Consumer onFileChange; - final Consumer onChangeFailure; - private long lastProcessed; - - /** - * Create file monitoring task to be scheduled using a standard Java {@link java.util.Timer} - * instance. - * - * @param filePath The path to the file to monitor. - * @param onFileChange The function to call when the file has changed. - * @param onChangeFailure The function to call when an exception is thrown during the - * file change processing. - */ - public FileMonitoringTimerTask(Path filePath, Consumer onFileChange, - Consumer onChangeFailure) { - Preconditions.checkNotNull(filePath, "path to monitor disk file is not set"); - Preconditions.checkNotNull(onFileChange, "action to monitor disk file is not set"); - - this.filePath = filePath; - this.lastProcessed = filePath.toFile().lastModified(); - this.onFileChange = onFileChange; - this.onChangeFailure = onChangeFailure; - } - - @Override - public void run() { - if (lastProcessed != filePath.toFile().lastModified()) { - try { - onFileChange.accept(filePath); - } catch (Throwable t) { - if (onChangeFailure != null) { - onChangeFailure.accept(t); - } else { - LOG.error(PROCESS_ERROR_MESSAGE + filePath.toString(), t); - } - } - lastProcessed = filePath.toFile().lastModified(); - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java deleted file mode 100644 index 72e8b6b63b420..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java +++ /dev/null @@ -1,157 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.security.ssl; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.net.ssl.*; -import java.io.IOException; -import java.io.InputStream; -import java.net.Socket; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.GeneralSecurityException; -import java.security.KeyStore; -import java.security.Principal; -import java.security.PrivateKey; -import java.security.cert.X509Certificate; -import java.util.concurrent.atomic.AtomicReference; - -/** - * An implementation of X509KeyManager that exposes a method, - * {@link #loadFrom(Path)} to reload its configuration. Note that it is necessary - * to implement the X509ExtendedKeyManager to properly delegate - * the additional methods, otherwise the SSL handshake will fail. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class ReloadingX509KeystoreManager extends X509ExtendedKeyManager { - - private static final Logger LOG = LoggerFactory.getLogger(ReloadingX509TrustManager.class); - - static final String RELOAD_ERROR_MESSAGE = - "Could not load keystore (keep using existing one) : "; - - final private String type; - final private String storePassword; - final private String keyPassword; - private AtomicReference keyManagerRef; - - /** - * Construct a Reloading509KeystoreManager - * - * @param type type of keystore file, typically 'jks'. - * @param location local path to the keystore file. - * @param storePassword password of the keystore file. - * @param keyPassword The password of the key. - * @throws IOException - * @throws GeneralSecurityException - */ - public ReloadingX509KeystoreManager(String type, String location, - String storePassword, String keyPassword) - throws IOException, GeneralSecurityException { - this.type = type; - this.storePassword = storePassword; - this.keyPassword = keyPassword; - keyManagerRef = new AtomicReference(); - keyManagerRef.set(loadKeyManager(Paths.get(location))); - } - - @Override - public String chooseEngineClientAlias(String[] strings, Principal[] principals, - SSLEngine sslEngine) { - return keyManagerRef.get().chooseEngineClientAlias(strings, principals, sslEngine); - } - - @Override - public String chooseEngineServerAlias(String s, Principal[] principals, - SSLEngine sslEngine) { - return keyManagerRef.get().chooseEngineServerAlias(s, principals, sslEngine); - } - - @Override - public String[] getClientAliases(String s, Principal[] principals) { - return keyManagerRef.get().getClientAliases(s, principals); - } - - @Override - public String chooseClientAlias(String[] strings, Principal[] principals, - Socket socket) { - return keyManagerRef.get().chooseClientAlias(strings, principals, socket); - } - - @Override - public String[] getServerAliases(String s, Principal[] principals) { - return keyManagerRef.get().getServerAliases(s, principals); - } - - @Override - public String chooseServerAlias(String s, Principal[] principals, - Socket socket) { - return keyManagerRef.get().chooseServerAlias(s, principals, socket); - } - - @Override - public X509Certificate[] getCertificateChain(String s) { - return keyManagerRef.get().getCertificateChain(s); - } - - @Override - public PrivateKey getPrivateKey(String s) { - return keyManagerRef.get().getPrivateKey(s); - } - - public ReloadingX509KeystoreManager loadFrom(Path path) { - try { - this.keyManagerRef.set(loadKeyManager(path)); - } catch (Exception ex) { - // The Consumer.accept interface forces us to convert to unchecked - throw new RuntimeException(ex); - } - return this; - } - - private X509ExtendedKeyManager loadKeyManager(Path path) - throws IOException, GeneralSecurityException { - - X509ExtendedKeyManager keyManager = null; - KeyStore keystore = KeyStore.getInstance(type); - - try (InputStream is = Files.newInputStream(path)) { - keystore.load(is, this.storePassword.toCharArray()); - } - - LOG.debug(" Loaded KeyStore: " + path.toFile().getAbsolutePath()); - - KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance( - SSLFactory.SSLCERTIFICATE); - keyMgrFactory.init(keystore, - (keyPassword != null) ? keyPassword.toCharArray() : null); - for (KeyManager candidate: keyMgrFactory.getKeyManagers()) { - if (candidate instanceof X509ExtendedKeyManager) { - keyManager = (X509ExtendedKeyManager)candidate; - break; - } - } - return keyManager; - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java index 68fd4c161005c..7430477932292 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java @@ -32,8 +32,6 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.cert.CertificateException; @@ -41,23 +39,31 @@ import java.util.concurrent.atomic.AtomicReference; /** - * A {@link TrustManager} implementation that exposes a method, {@link #loadFrom(Path)} - * to reload its configuration for example when the truststore file on disk changes. + * A {@link TrustManager} implementation that reloads its configuration when + * the truststore file on disk changes. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public final class ReloadingX509TrustManager implements X509TrustManager { +public final class ReloadingX509TrustManager + implements X509TrustManager, Runnable { + @VisibleForTesting static final Logger LOG = LoggerFactory.getLogger(ReloadingX509TrustManager.class); - + @VisibleForTesting static final String RELOAD_ERROR_MESSAGE = "Could not load truststore (keep using existing one) : "; private String type; + private File file; private String password; + private long lastLoaded; + private long reloadInterval; private AtomicReference trustManagerRef; + private volatile boolean running; + private Thread reloader; + /** * Creates a reloadable trustmanager. The trustmanager reloads itself * if the underlying trustore file has changed. @@ -65,18 +71,49 @@ public final class ReloadingX509TrustManager implements X509TrustManager { * @param type type of truststore file, typically 'jks'. * @param location local path to the truststore file. * @param password password of the truststore file. + * @param reloadInterval interval to check if the truststore file has * changed, in milliseconds. * @throws IOException thrown if the truststore could not be initialized due * to an IO error. * @throws GeneralSecurityException thrown if the truststore could not be * initialized due to a security error. */ - public ReloadingX509TrustManager(String type, String location, String password) + public ReloadingX509TrustManager(String type, String location, + String password, long reloadInterval) throws IOException, GeneralSecurityException { this.type = type; + file = new File(location); this.password = password; trustManagerRef = new AtomicReference(); - trustManagerRef.set(loadTrustManager(Paths.get(location))); + trustManagerRef.set(loadTrustManager()); + this.reloadInterval = reloadInterval; + } + + /** + * Starts the reloader thread. + */ + public void init() { + reloader = new Thread(this, "Truststore reloader thread"); + reloader.setDaemon(true); + running = true; + reloader.start(); + } + + /** + * Stops the reloader thread. + */ + public void destroy() { + running = false; + reloader.interrupt(); + } + + /** + * Returns the reload check interval. + * + * @return the reload check interval, in milliseconds. + */ + public long getReloadInterval() { + return reloadInterval; } @Override @@ -114,24 +151,27 @@ public X509Certificate[] getAcceptedIssuers() { return issuers; } - public ReloadingX509TrustManager loadFrom(Path path) { - try { - this.trustManagerRef.set(loadTrustManager(path)); - } catch (Exception ex) { - // The Consumer.accept interface forces us to convert to unchecked - throw new RuntimeException(RELOAD_ERROR_MESSAGE, ex); + boolean needsReload() { + boolean reload = true; + if (file.exists()) { + if (file.lastModified() == lastLoaded) { + reload = false; + } + } else { + lastLoaded = 0; } - return this; + return reload; } - X509TrustManager loadTrustManager(Path path) + X509TrustManager loadTrustManager() throws IOException, GeneralSecurityException { X509TrustManager trustManager = null; KeyStore ks = KeyStore.getInstance(type); - InputStream in = Files.newInputStream(path); + InputStream in = Files.newInputStream(file.toPath()); try { ks.load(in, (password == null) ? null : password.toCharArray()); - LOG.debug("Loaded truststore '" + path + "'"); + lastLoaded = file.lastModified(); + LOG.debug("Loaded truststore '" + file + "'"); } finally { in.close(); } @@ -148,4 +188,23 @@ X509TrustManager loadTrustManager(Path path) } return trustManager; } + + @Override + public void run() { + while (running) { + try { + Thread.sleep(reloadInterval); + } catch (InterruptedException e) { + //NOP + } + if (running && needsReload()) { + try { + trustManagerRef.set(loadTrustManager()); + } catch (Exception ex) { + LOG.warn(RELOAD_ERROR_MESSAGE + ex.toString(), ex); + } + } + } + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java deleted file mode 100644 index bf0a6abdc676d..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java +++ /dev/null @@ -1,205 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.security.ssl; - -import org.apache.hadoop.thirdparty.com.google.common.base.Supplier; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.cert.X509Certificate; -import java.util.Timer; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.*; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -public class TestReloadingX509KeyManager { - - private static final String BASEDIR = GenericTestUtils.getTempPath( - TestReloadingX509TrustManager.class.getSimpleName()); - - private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs( - FileMonitoringTimerTask.LOG); - - @BeforeClass - public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - } - - @Test(expected = IOException.class) - public void testLoadMissingKeyStore() throws Exception { - String keystoreLocation = BASEDIR + "/testmissing.jks"; - - ReloadingX509KeystoreManager tm = - new ReloadingX509KeystoreManager("jks", keystoreLocation, - "password", - "password"); - } - - @Test(expected = IOException.class) - public void testLoadCorruptKeyStore() throws Exception { - String keystoreLocation = BASEDIR + "/testcorrupt.jks"; - OutputStream os = new FileOutputStream(keystoreLocation); - os.write(1); - os.close(); - - ReloadingX509KeystoreManager tm = - new ReloadingX509KeystoreManager("jks", keystoreLocation, - "password", - "password"); - } - - @Test (timeout = 3000000) - public void testReload() throws Exception { - KeyPair kp = generateKeyPair("RSA"); - X509Certificate sCert = generateCertificate("CN=localhost, O=server", kp, 30, - "SHA1withRSA"); - String keystoreLocation = BASEDIR + "/testreload.jks"; - createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), sCert); - - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); - ReloadingX509KeystoreManager tm = - new ReloadingX509KeystoreManager("jks", keystoreLocation, - "password", - "password"); - try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); - assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); - - // Wait so that the file modification time is different - Thread.sleep((reloadInterval+ 1000)); - - // Change the certificate with a new keypair - final KeyPair anotherKP = generateKeyPair("RSA"); - sCert = KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", anotherKP, 30, - "SHA1withRSA"); - createKeyStore(keystoreLocation, "password", "cert1", anotherKP.getPrivate(), sCert); - - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return tm.getPrivateKey("cert1").equals(kp.getPrivate()); - } - }, (int) reloadInterval, 100000); - } finally { - fileMonitoringTimer.cancel(); - } - } - - @Test (timeout = 30000) - public void testReloadMissingTrustStore() throws Exception { - KeyPair kp = generateKeyPair("RSA"); - X509Certificate cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); - String keystoreLocation = BASEDIR + "/testmissing.jks"; - createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), cert1); - - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); - ReloadingX509KeystoreManager tm = - new ReloadingX509KeystoreManager("jks", keystoreLocation, - "password", - "password"); - try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); - assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); - - assertFalse(reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); - - // Wait for the first reload to happen so we actually detect a change after the delete - Thread.sleep((reloadInterval+ 1000)); - - new File(keystoreLocation).delete(); - - // Wait for the reload to happen and log to get written to - Thread.sleep((reloadInterval+ 1000)); - - waitForFailedReloadAtLeastOnce((int) reloadInterval); - - assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); - } finally { - reloaderLog.stopCapturing(); - fileMonitoringTimer.cancel(); - } - } - - - @Test (timeout = 30000) - public void testReloadCorruptTrustStore() throws Exception { - KeyPair kp = generateKeyPair("RSA"); - X509Certificate cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); - String keystoreLocation = BASEDIR + "/testmissing.jks"; - createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), cert1); - - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); - ReloadingX509KeystoreManager tm = - new ReloadingX509KeystoreManager("jks", keystoreLocation, - "password", - "password"); - try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); - assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); - - // Wait so that the file modification time is different - Thread.sleep((reloadInterval + 1000)); - - assertFalse(reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); - OutputStream os = new FileOutputStream(keystoreLocation); - os.write(1); - os.close(); - - waitForFailedReloadAtLeastOnce((int) reloadInterval); - - assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); - } finally { - reloaderLog.stopCapturing(); - fileMonitoringTimer.cancel(); - } - } - - /**Wait for the reloader thread to load the configurations at least once - * by probing the log of the thread if the reload fails. - */ - private void waitForFailedReloadAtLeastOnce(int reloadInterval) - throws InterruptedException, TimeoutException { - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE); - } - }, reloadInterval, 10 * 1000); - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java index 63589592f35dd..441f552649298 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java @@ -30,12 +30,10 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; -import java.nio.file.Paths; import java.security.KeyPair; import java.security.cert.X509Certificate; import java.util.HashMap; import java.util.Map; -import java.util.Timer; import java.util.concurrent.TimeoutException; import static org.junit.Assert.assertEquals; @@ -52,7 +50,7 @@ public class TestReloadingX509TrustManager { private X509Certificate cert1; private X509Certificate cert2; private final LogCapturer reloaderLog = LogCapturer.captureLogs( - FileMonitoringTimerTask.LOG); + ReloadingX509TrustManager.LOG); @BeforeClass public static void setUp() throws Exception { @@ -66,7 +64,12 @@ public void testLoadMissingTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testmissing.jks"; ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password"); + new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + try { + tm.init(); + } finally { + tm.destroy(); + } } @Test(expected = IOException.class) @@ -77,7 +80,12 @@ public void testLoadCorruptTrustStore() throws Exception { os.close(); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password"); + new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + try { + tm.init(); + } finally { + tm.destroy(); + } } @Test (timeout = 30000) @@ -88,17 +96,14 @@ public void testReload() throws Exception { String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password"); + new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + tm.init(); assertEquals(1, tm.getAcceptedIssuers().length); // Wait so that the file modification time is different - Thread.sleep((reloadInterval+ 1000)); + Thread.sleep((tm.getReloadInterval() + 1000)); // Add another cert Map certs = new HashMap(); @@ -111,9 +116,9 @@ public void testReload() throws Exception { public Boolean get() { return tm.getAcceptedIssuers().length == 2; } - }, (int) reloadInterval, 100000); + }, (int) tm.getReloadInterval(), 10000); } finally { - fileMonitoringTimer.cancel(); + tm.destroy(); } } @@ -125,38 +130,27 @@ public void testReloadMissingTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testmissing.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password"); + new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + tm.init(); assertEquals(1, tm.getAcceptedIssuers().length); X509Certificate cert = tm.getAcceptedIssuers()[0]; assertFalse(reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); - - // Wait for the first reload to happen so we actually detect a change after the delete - Thread.sleep((reloadInterval+ 1000)); - + ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE)); new File(truststoreLocation).delete(); - // Wait for the reload to happen and log to get written to - Thread.sleep((reloadInterval+ 1000)); - - waitForFailedReloadAtLeastOnce((int) reloadInterval); + waitForFailedReloadAtLeastOnce((int) tm.getReloadInterval()); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); - fileMonitoringTimer.cancel(); + tm.destroy(); } } - @Test (timeout = 30000) public void testReloadCorruptTrustStore() throws Exception { KeyPair kp = generateKeyPair("RSA"); @@ -165,32 +159,29 @@ public void testReloadCorruptTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testcorrupt.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); - long reloadInterval = 10; - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password"); + new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + tm.init(); assertEquals(1, tm.getAcceptedIssuers().length); final X509Certificate cert = tm.getAcceptedIssuers()[0]; // Wait so that the file modification time is different - Thread.sleep((reloadInterval + 1000)); + Thread.sleep((tm.getReloadInterval() + 1000)); assertFalse(reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE)); OutputStream os = new FileOutputStream(truststoreLocation); os.write(1); os.close(); - waitForFailedReloadAtLeastOnce((int) reloadInterval); + waitForFailedReloadAtLeastOnce((int) tm.getReloadInterval()); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); - fileMonitoringTimer.cancel(); + tm.destroy(); } } @@ -203,7 +194,7 @@ private void waitForFailedReloadAtLeastOnce(int reloadInterval) @Override public Boolean get() { return reloaderLog.getOutput().contains( - FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE); + ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE); } }, reloadInterval, 10 * 1000); } @@ -217,15 +208,13 @@ public void testNoPassword() throws Exception { String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); - Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, null); + new ReloadingX509TrustManager("jks", truststoreLocation, null, 10); try { - fileMonitoringTimer.schedule(new FileMonitoringTimerTask( - Paths.get(truststoreLocation), tm::loadFrom,null), 10, 10); + tm.init(); assertEquals(1, tm.getAcceptedIssuers().length); } finally { - fileMonitoringTimer.cancel(); + tm.destroy(); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index eebe0baaf2356..108ce50420640 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -27,7 +27,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; -import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.test.GenericTestUtils; @@ -100,7 +99,7 @@ public void testSSLFactoryCleanup() throws Exception { Thread reloaderThread = null; for (Thread thread : threads) { if ((thread.getName() != null) - && (thread.getName().contains(SSL_MONITORING_THREAD_NAME))) { + && (thread.getName().contains("Truststore reloader thread"))) { reloaderThread = thread; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java index 4d9c32067339b..9f9564a310641 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java @@ -41,7 +41,6 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; -import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME; import org.apache.hadoop.test.TestGenericTestUtils; import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; @@ -477,7 +476,7 @@ public void testTimelineClientCleanup() throws Exception { Thread reloaderThread = null; for (Thread thread : threads) { if ((thread.getName() != null) - && (thread.getName().contains(SSL_MONITORING_THREAD_NAME))) { + && (thread.getName().contains("Truststore reloader thread"))) { reloaderThread = thread; } } diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index caa9a1b6c0bc6..dff9a2b08a9f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -11,8 +11,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. ---> - +--> 4.0.0 org.apache.hadoop From c3d01ebd926b42bae4c1847523ad8292985bb06f Mon Sep 17 00:00:00 2001 From: Borislav Iordanov Date: Wed, 31 Mar 2021 10:45:15 -0700 Subject: [PATCH 177/666] HADOOP-16524. Automatic keystore reloading for HttpServer2 Reapply of issue reverted first because it caused yarn failures and then again because the commit message was incorrectly formatted (and yet again because of commit message format). Signed-off-by: stack --- .../org/apache/hadoop/http/HttpServer2.java | 62 ++++- .../ssl/FileBasedKeyStoresFactory.java | 222 +++++++++++------- .../security/ssl/FileMonitoringTimerTask.java | 85 +++++++ .../ssl/ReloadingX509KeystoreManager.java | 157 +++++++++++++ .../ssl/ReloadingX509TrustManager.java | 95 ++------ .../ssl/TestReloadingX509KeyManager.java | 205 ++++++++++++++++ .../ssl/TestReloadingX509TrustManager.java | 77 +++--- .../hdfs/web/TestURLConnectionFactory.java | 3 +- .../client/api/impl/TestTimelineClient.java | 3 +- hadoop-yarn-project/hadoop-yarn/pom.xml | 3 +- 10 files changed, 715 insertions(+), 197 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 7534cba45ee13..dde27dbec497a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -27,14 +27,17 @@ import java.net.MalformedURLException; import java.net.URI; import java.net.URL; -import java.util.Arrays; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Enumeration; -import java.util.HashMap; +import java.nio.file.Paths; import java.util.List; +import java.util.ArrayList; import java.util.Map; +import java.util.HashMap; +import java.util.Collections; +import java.util.Optional; import java.util.Properties; +import java.util.Enumeration; +import java.util.Arrays; +import java.util.Timer; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -75,6 +78,8 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory; +import org.apache.hadoop.security.ssl.FileMonitoringTimerTask; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; @@ -186,6 +191,7 @@ public final class HttpServer2 implements FilterContainer { static final String STATE_DESCRIPTION_ALIVE = " - alive"; static final String STATE_DESCRIPTION_NOT_LIVE = " - not live"; private final SignerSecretProvider secretProvider; + private final Optional configurationChangeMonitor; private XFrameOption xFrameOption; private boolean xFrameOptionIsEnabled; public static final String HTTP_HEADER_PREFIX = "hadoop.http.header."; @@ -244,6 +250,8 @@ public static class Builder { private boolean sniHostCheckEnabled; + private Optional configurationChangeMonitor = Optional.empty(); + public Builder setName(String name){ this.name = name; return this; @@ -574,12 +582,45 @@ private ServerConnector createHttpsChannelConnector( } setEnabledProtocols(sslContextFactory); + + long storesReloadInterval = + conf.getLong(FileBasedKeyStoresFactory.SSL_STORES_RELOAD_INTERVAL_TPL_KEY, + FileBasedKeyStoresFactory.DEFAULT_SSL_STORES_RELOAD_INTERVAL); + + if (storesReloadInterval > 0) { + this.configurationChangeMonitor = Optional.of( + this.makeConfigurationChangeMonitor(storesReloadInterval, sslContextFactory)); + } + conn.addFirstConnectionFactory(new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString())); return conn; } + private Timer makeConfigurationChangeMonitor(long reloadInterval, + SslContextFactory.Server sslContextFactory) { + java.util.Timer timer = new java.util.Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + // + // The Jetty SSLContextFactory provides a 'reload' method which will reload both + // truststore and keystore certificates. + // + timer.schedule(new FileMonitoringTimerTask( + Paths.get(keyStore), + path -> { + LOG.info("Reloading certificates from store keystore " + keyStore); + try { + sslContextFactory.reload(factory -> { }); + } catch (Exception ex) { + LOG.error("Failed to reload SSL keystore certificates", ex); + } + },null), + reloadInterval, + reloadInterval + ); + return timer; + } + private void setEnabledProtocols(SslContextFactory sslContextFactory) { String enabledProtocols = conf.get(SSLFactory.SSL_ENABLED_PROTOCOLS_KEY, SSLFactory.SSL_ENABLED_PROTOCOLS_DEFAULT); @@ -622,6 +663,7 @@ private HttpServer2(final Builder b) throws IOException { this.webAppContext = createWebAppContext(b, adminsAcl, appDir); this.xFrameOptionIsEnabled = b.xFrameEnabled; this.xFrameOption = b.xFrameOption; + this.configurationChangeMonitor = b.configurationChangeMonitor; try { this.secretProvider = @@ -1420,6 +1462,16 @@ void openListeners() throws Exception { */ public void stop() throws Exception { MultiException exception = null; + if (this.configurationChangeMonitor.isPresent()) { + try { + this.configurationChangeMonitor.get().cancel(); + } catch (Exception e) { + LOG.error( + "Error while canceling configuration monitoring timer for webapp" + + webAppContext.getDisplayName(), e); + exception = addMultiException(exception, e); + } + } for (ServerConnector c : listeners) { try { c.close(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java index b184e4a152b8b..236d881581dac 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java @@ -29,20 +29,20 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManager; import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.text.MessageFormat; +import java.util.Timer; /** * {@link KeyStoresFactory} implementation that reads the certificates from * keystore files. *

- * if the trust certificates keystore file changes, the {@link TrustManager} - * is refreshed with the new trust certificate entries (using a - * {@link ReloadingX509TrustManager} trustmanager). + * If either the truststore or the keystore certificates file changes, it + * would be refreshed under the corresponding wrapper implementation - + * {@link ReloadingX509KeystoreManager} or {@link ReloadingX509TrustManager}. + *

*/ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -51,6 +51,19 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { private static final Logger LOG = LoggerFactory.getLogger(FileBasedKeyStoresFactory.class); + + /** + * The name of the timer thread monitoring file changes. + */ + public static final String SSL_MONITORING_THREAD_NAME = "SSL Certificates Store Monitor"; + + /** + * The refresh interval used to check if either of the truststore or keystore + * certificate file has changed. + */ + public static final String SSL_STORES_RELOAD_INTERVAL_TPL_KEY = + "ssl.{0}.stores.reload.interval"; + public static final String SSL_KEYSTORE_LOCATION_TPL_KEY = "ssl.{0}.keystore.location"; public static final String SSL_KEYSTORE_PASSWORD_TPL_KEY = @@ -77,14 +90,119 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { public static final String DEFAULT_KEYSTORE_TYPE = "jks"; /** - * Reload interval in milliseconds. + * The default time interval in milliseconds used to check if either + * of the truststore or keystore certificates file has changed and needs reloading. */ - public static final int DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL = 10000; + public static final int DEFAULT_SSL_STORES_RELOAD_INTERVAL = 10000; private Configuration conf; private KeyManager[] keyManagers; private TrustManager[] trustManagers; private ReloadingX509TrustManager trustManager; + private Timer fileMonitoringTimer; + + + private void createTrustManagersFromConfiguration(SSLFactory.Mode mode, + String truststoreType, + String truststoreLocation, + long storesReloadInterval) + throws IOException, GeneralSecurityException { + String passwordProperty = resolvePropertyName(mode, + SSL_TRUSTSTORE_PASSWORD_TPL_KEY); + String truststorePassword = getPassword(conf, passwordProperty, ""); + if (truststorePassword.isEmpty()) { + // An empty trust store password is legal; the trust store password + // is only required when writing to a trust store. Otherwise it's + // an optional integrity check. + truststorePassword = null; + } + + // Check if obsolete truststore specific reload interval is present for backward compatible + long truststoreReloadInterval = + conf.getLong( + resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), + storesReloadInterval); + + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation + + ", reloading at " + truststoreReloadInterval + " millis."); + } + + trustManager = new ReloadingX509TrustManager( + truststoreType, + truststoreLocation, + truststorePassword); + + if (truststoreReloadInterval > 0) { + fileMonitoringTimer.schedule( + new FileMonitoringTimerTask( + Paths.get(truststoreLocation), + path -> trustManager.loadFrom(path), + exception -> LOG.error(ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE, exception)), + truststoreReloadInterval, + truststoreReloadInterval); + } + + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); + } + trustManagers = new TrustManager[]{trustManager}; + } + + /** + * Implements logic of initializing the KeyManagers with the options + * to reload keystores. + * @param mode client or server + * @param keystoreType The keystore type. + * @param storesReloadInterval The interval to check if the keystore certificates + * file has changed. + */ + private void createKeyManagersFromConfiguration(SSLFactory.Mode mode, + String keystoreType, long storesReloadInterval) + throws GeneralSecurityException, IOException { + String locationProperty = + resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY); + String keystoreLocation = conf.get(locationProperty, ""); + if (keystoreLocation.isEmpty()) { + throw new GeneralSecurityException("The property '" + locationProperty + + "' has not been set in the ssl configuration file."); + } + String passwordProperty = + resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY); + String keystorePassword = getPassword(conf, passwordProperty, ""); + if (keystorePassword.isEmpty()) { + throw new GeneralSecurityException("The property '" + passwordProperty + + "' has not been set in the ssl configuration file."); + } + String keyPasswordProperty = + resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY); + // Key password defaults to the same value as store password for + // compatibility with legacy configurations that did not use a separate + // configuration property for key password. + String keystoreKeyPassword = getPassword( + conf, keyPasswordProperty, keystorePassword); + if (LOG.isDebugEnabled()) { + LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation); + } + + ReloadingX509KeystoreManager keystoreManager = new ReloadingX509KeystoreManager( + keystoreType, + keystoreLocation, + keystorePassword, + keystoreKeyPassword); + + if (storesReloadInterval > 0) { + fileMonitoringTimer.schedule( + new FileMonitoringTimerTask( + Paths.get(keystoreLocation), + path -> keystoreManager.loadFrom(path), + exception -> LOG.error(ReloadingX509KeystoreManager.RELOAD_ERROR_MESSAGE, exception)), + storesReloadInterval, + storesReloadInterval); + } + + keyManagers = new KeyManager[] { keystoreManager }; + } /** * Resolves a property name to its client/server version if applicable. @@ -139,56 +257,28 @@ public void init(SSLFactory.Mode mode) conf.getBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, SSLFactory.SSL_REQUIRE_CLIENT_CERT_DEFAULT); + long storesReloadInterval = conf.getLong( + resolvePropertyName(mode, SSL_STORES_RELOAD_INTERVAL_TPL_KEY), + DEFAULT_SSL_STORES_RELOAD_INTERVAL); + + fileMonitoringTimer = new Timer(SSL_MONITORING_THREAD_NAME, true); + // certificate store String keystoreType = - conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY), - DEFAULT_KEYSTORE_TYPE); - KeyStore keystore = KeyStore.getInstance(keystoreType); - String keystoreKeyPassword = null; - if (requireClientCert || mode == SSLFactory.Mode.SERVER) { - String locationProperty = - resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY); - String keystoreLocation = conf.get(locationProperty, ""); - if (keystoreLocation.isEmpty()) { - throw new GeneralSecurityException("The property '" + locationProperty + - "' has not been set in the ssl configuration file."); - } - String passwordProperty = - resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY); - String keystorePassword = getPassword(conf, passwordProperty, ""); - if (keystorePassword.isEmpty()) { - throw new GeneralSecurityException("The property '" + passwordProperty + - "' has not been set in the ssl configuration file."); - } - String keyPasswordProperty = - resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY); - // Key password defaults to the same value as store password for - // compatibility with legacy configurations that did not use a separate - // configuration property for key password. - keystoreKeyPassword = getPassword( - conf, keyPasswordProperty, keystorePassword); - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation); - } + conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY), + DEFAULT_KEYSTORE_TYPE); - InputStream is = Files.newInputStream(Paths.get(keystoreLocation)); - try { - keystore.load(is, keystorePassword.toCharArray()); - } finally { - is.close(); - } - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " Loaded KeyStore: " + keystoreLocation); - } + if (requireClientCert || mode == SSLFactory.Mode.SERVER) { + createKeyManagersFromConfiguration(mode, keystoreType, storesReloadInterval); } else { + KeyStore keystore = KeyStore.getInstance(keystoreType); keystore.load(null, null); + KeyManagerFactory keyMgrFactory = KeyManagerFactory + .getInstance(SSLFactory.SSLCERTIFICATE); + + keyMgrFactory.init(keystore, null); + keyManagers = keyMgrFactory.getKeyManagers(); } - KeyManagerFactory keyMgrFactory = KeyManagerFactory - .getInstance(SSLFactory.SSLCERTIFICATE); - - keyMgrFactory.init(keystore, (keystoreKeyPassword != null) ? - keystoreKeyPassword.toCharArray() : null); - keyManagers = keyMgrFactory.getKeyManagers(); //trust store String truststoreType = @@ -199,33 +289,7 @@ public void init(SSLFactory.Mode mode) resolvePropertyName(mode, SSL_TRUSTSTORE_LOCATION_TPL_KEY); String truststoreLocation = conf.get(locationProperty, ""); if (!truststoreLocation.isEmpty()) { - String passwordProperty = resolvePropertyName(mode, - SSL_TRUSTSTORE_PASSWORD_TPL_KEY); - String truststorePassword = getPassword(conf, passwordProperty, ""); - if (truststorePassword.isEmpty()) { - // An empty trust store password is legal; the trust store password - // is only required when writing to a trust store. Otherwise it's - // an optional integrity check. - truststorePassword = null; - } - long truststoreReloadInterval = - conf.getLong( - resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), - DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL); - - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation); - } - - trustManager = new ReloadingX509TrustManager(truststoreType, - truststoreLocation, - truststorePassword, - truststoreReloadInterval); - trustManager.init(); - if (LOG.isDebugEnabled()) { - LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); - } - trustManagers = new TrustManager[]{trustManager}; + createTrustManagersFromConfiguration(mode, truststoreType, truststoreLocation, storesReloadInterval); } else { if (LOG.isDebugEnabled()) { LOG.debug("The property '" + locationProperty + "' has not been set, " + @@ -256,7 +320,7 @@ String getPassword(Configuration conf, String alias, String defaultPass) { @Override public synchronized void destroy() { if (trustManager != null) { - trustManager.destroy(); + fileMonitoringTimer.cancel(); trustManager = null; keyManagers = null; trustManagers = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java new file mode 100644 index 0000000000000..40b61978ef1f4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security.ssl; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.file.Path; +import java.util.TimerTask; +import java.util.function.Consumer; + +/** + * Implements basic logic to track when a file changes on disk and call the action + * passed to the constructor when it does. An exception handler can optionally also be specified + * in the constructor, otherwise any exception occurring during process will be logged + * using this class' logger. + */ +@InterfaceAudience.Private +public class FileMonitoringTimerTask extends TimerTask { + + static final Logger LOG = LoggerFactory.getLogger(FileMonitoringTimerTask.class); + + @VisibleForTesting + static final String PROCESS_ERROR_MESSAGE = + "Could not process file change : "; + + final private Path filePath; + final private Consumer onFileChange; + final Consumer onChangeFailure; + private long lastProcessed; + + /** + * Create file monitoring task to be scheduled using a standard Java {@link java.util.Timer} + * instance. + * + * @param filePath The path to the file to monitor. + * @param onFileChange The function to call when the file has changed. + * @param onChangeFailure The function to call when an exception is thrown during the + * file change processing. + */ + public FileMonitoringTimerTask(Path filePath, Consumer onFileChange, + Consumer onChangeFailure) { + Preconditions.checkNotNull(filePath, "path to monitor disk file is not set"); + Preconditions.checkNotNull(onFileChange, "action to monitor disk file is not set"); + + this.filePath = filePath; + this.lastProcessed = filePath.toFile().lastModified(); + this.onFileChange = onFileChange; + this.onChangeFailure = onChangeFailure; + } + + @Override + public void run() { + if (lastProcessed != filePath.toFile().lastModified()) { + try { + onFileChange.accept(filePath); + } catch (Throwable t) { + if (onChangeFailure != null) { + onChangeFailure.accept(t); + } else { + LOG.error(PROCESS_ERROR_MESSAGE + filePath.toString(), t); + } + } + lastProcessed = filePath.toFile().lastModified(); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java new file mode 100644 index 0000000000000..72e8b6b63b420 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security.ssl; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.*; +import java.io.IOException; +import java.io.InputStream; +import java.net.Socket; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.Principal; +import java.security.PrivateKey; +import java.security.cert.X509Certificate; +import java.util.concurrent.atomic.AtomicReference; + +/** + * An implementation of X509KeyManager that exposes a method, + * {@link #loadFrom(Path)} to reload its configuration. Note that it is necessary + * to implement the X509ExtendedKeyManager to properly delegate + * the additional methods, otherwise the SSL handshake will fail. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ReloadingX509KeystoreManager extends X509ExtendedKeyManager { + + private static final Logger LOG = LoggerFactory.getLogger(ReloadingX509TrustManager.class); + + static final String RELOAD_ERROR_MESSAGE = + "Could not load keystore (keep using existing one) : "; + + final private String type; + final private String storePassword; + final private String keyPassword; + private AtomicReference keyManagerRef; + + /** + * Construct a Reloading509KeystoreManager + * + * @param type type of keystore file, typically 'jks'. + * @param location local path to the keystore file. + * @param storePassword password of the keystore file. + * @param keyPassword The password of the key. + * @throws IOException + * @throws GeneralSecurityException + */ + public ReloadingX509KeystoreManager(String type, String location, + String storePassword, String keyPassword) + throws IOException, GeneralSecurityException { + this.type = type; + this.storePassword = storePassword; + this.keyPassword = keyPassword; + keyManagerRef = new AtomicReference(); + keyManagerRef.set(loadKeyManager(Paths.get(location))); + } + + @Override + public String chooseEngineClientAlias(String[] strings, Principal[] principals, + SSLEngine sslEngine) { + return keyManagerRef.get().chooseEngineClientAlias(strings, principals, sslEngine); + } + + @Override + public String chooseEngineServerAlias(String s, Principal[] principals, + SSLEngine sslEngine) { + return keyManagerRef.get().chooseEngineServerAlias(s, principals, sslEngine); + } + + @Override + public String[] getClientAliases(String s, Principal[] principals) { + return keyManagerRef.get().getClientAliases(s, principals); + } + + @Override + public String chooseClientAlias(String[] strings, Principal[] principals, + Socket socket) { + return keyManagerRef.get().chooseClientAlias(strings, principals, socket); + } + + @Override + public String[] getServerAliases(String s, Principal[] principals) { + return keyManagerRef.get().getServerAliases(s, principals); + } + + @Override + public String chooseServerAlias(String s, Principal[] principals, + Socket socket) { + return keyManagerRef.get().chooseServerAlias(s, principals, socket); + } + + @Override + public X509Certificate[] getCertificateChain(String s) { + return keyManagerRef.get().getCertificateChain(s); + } + + @Override + public PrivateKey getPrivateKey(String s) { + return keyManagerRef.get().getPrivateKey(s); + } + + public ReloadingX509KeystoreManager loadFrom(Path path) { + try { + this.keyManagerRef.set(loadKeyManager(path)); + } catch (Exception ex) { + // The Consumer.accept interface forces us to convert to unchecked + throw new RuntimeException(ex); + } + return this; + } + + private X509ExtendedKeyManager loadKeyManager(Path path) + throws IOException, GeneralSecurityException { + + X509ExtendedKeyManager keyManager = null; + KeyStore keystore = KeyStore.getInstance(type); + + try (InputStream is = Files.newInputStream(path)) { + keystore.load(is, this.storePassword.toCharArray()); + } + + LOG.debug(" Loaded KeyStore: " + path.toFile().getAbsolutePath()); + + KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance( + SSLFactory.SSLCERTIFICATE); + keyMgrFactory.init(keystore, + (keyPassword != null) ? keyPassword.toCharArray() : null); + for (KeyManager candidate: keyMgrFactory.getKeyManagers()) { + if (candidate instanceof X509ExtendedKeyManager) { + keyManager = (X509ExtendedKeyManager)candidate; + break; + } + } + return keyManager; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java index 7430477932292..68fd4c161005c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java @@ -32,6 +32,8 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.cert.CertificateException; @@ -39,31 +41,23 @@ import java.util.concurrent.atomic.AtomicReference; /** - * A {@link TrustManager} implementation that reloads its configuration when - * the truststore file on disk changes. + * A {@link TrustManager} implementation that exposes a method, {@link #loadFrom(Path)} + * to reload its configuration for example when the truststore file on disk changes. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public final class ReloadingX509TrustManager - implements X509TrustManager, Runnable { +public final class ReloadingX509TrustManager implements X509TrustManager { - @VisibleForTesting static final Logger LOG = LoggerFactory.getLogger(ReloadingX509TrustManager.class); - @VisibleForTesting + static final String RELOAD_ERROR_MESSAGE = "Could not load truststore (keep using existing one) : "; private String type; - private File file; private String password; - private long lastLoaded; - private long reloadInterval; private AtomicReference trustManagerRef; - private volatile boolean running; - private Thread reloader; - /** * Creates a reloadable trustmanager. The trustmanager reloads itself * if the underlying trustore file has changed. @@ -71,49 +65,18 @@ public final class ReloadingX509TrustManager * @param type type of truststore file, typically 'jks'. * @param location local path to the truststore file. * @param password password of the truststore file. - * @param reloadInterval interval to check if the truststore file has * changed, in milliseconds. * @throws IOException thrown if the truststore could not be initialized due * to an IO error. * @throws GeneralSecurityException thrown if the truststore could not be * initialized due to a security error. */ - public ReloadingX509TrustManager(String type, String location, - String password, long reloadInterval) + public ReloadingX509TrustManager(String type, String location, String password) throws IOException, GeneralSecurityException { this.type = type; - file = new File(location); this.password = password; trustManagerRef = new AtomicReference(); - trustManagerRef.set(loadTrustManager()); - this.reloadInterval = reloadInterval; - } - - /** - * Starts the reloader thread. - */ - public void init() { - reloader = new Thread(this, "Truststore reloader thread"); - reloader.setDaemon(true); - running = true; - reloader.start(); - } - - /** - * Stops the reloader thread. - */ - public void destroy() { - running = false; - reloader.interrupt(); - } - - /** - * Returns the reload check interval. - * - * @return the reload check interval, in milliseconds. - */ - public long getReloadInterval() { - return reloadInterval; + trustManagerRef.set(loadTrustManager(Paths.get(location))); } @Override @@ -151,27 +114,24 @@ public X509Certificate[] getAcceptedIssuers() { return issuers; } - boolean needsReload() { - boolean reload = true; - if (file.exists()) { - if (file.lastModified() == lastLoaded) { - reload = false; - } - } else { - lastLoaded = 0; + public ReloadingX509TrustManager loadFrom(Path path) { + try { + this.trustManagerRef.set(loadTrustManager(path)); + } catch (Exception ex) { + // The Consumer.accept interface forces us to convert to unchecked + throw new RuntimeException(RELOAD_ERROR_MESSAGE, ex); } - return reload; + return this; } - X509TrustManager loadTrustManager() + X509TrustManager loadTrustManager(Path path) throws IOException, GeneralSecurityException { X509TrustManager trustManager = null; KeyStore ks = KeyStore.getInstance(type); - InputStream in = Files.newInputStream(file.toPath()); + InputStream in = Files.newInputStream(path); try { ks.load(in, (password == null) ? null : password.toCharArray()); - lastLoaded = file.lastModified(); - LOG.debug("Loaded truststore '" + file + "'"); + LOG.debug("Loaded truststore '" + path + "'"); } finally { in.close(); } @@ -188,23 +148,4 @@ X509TrustManager loadTrustManager() } return trustManager; } - - @Override - public void run() { - while (running) { - try { - Thread.sleep(reloadInterval); - } catch (InterruptedException e) { - //NOP - } - if (running && needsReload()) { - try { - trustManagerRef.set(loadTrustManager()); - } catch (Exception ex) { - LOG.warn(RELOAD_ERROR_MESSAGE + ex.toString(), ex); - } - } - } - } - } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java new file mode 100644 index 0000000000000..bf0a6abdc676d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.security.ssl; + +import org.apache.hadoop.thirdparty.com.google.common.base.Supplier; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Paths; +import java.security.KeyPair; +import java.security.cert.X509Certificate; +import java.util.Timer; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +public class TestReloadingX509KeyManager { + + private static final String BASEDIR = GenericTestUtils.getTempPath( + TestReloadingX509TrustManager.class.getSimpleName()); + + private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs( + FileMonitoringTimerTask.LOG); + + @BeforeClass + public static void setUp() throws Exception { + File base = new File(BASEDIR); + FileUtil.fullyDelete(base); + base.mkdirs(); + } + + @Test(expected = IOException.class) + public void testLoadMissingKeyStore() throws Exception { + String keystoreLocation = BASEDIR + "/testmissing.jks"; + + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + } + + @Test(expected = IOException.class) + public void testLoadCorruptKeyStore() throws Exception { + String keystoreLocation = BASEDIR + "/testcorrupt.jks"; + OutputStream os = new FileOutputStream(keystoreLocation); + os.write(1); + os.close(); + + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + } + + @Test (timeout = 3000000) + public void testReload() throws Exception { + KeyPair kp = generateKeyPair("RSA"); + X509Certificate sCert = generateCertificate("CN=localhost, O=server", kp, 30, + "SHA1withRSA"); + String keystoreLocation = BASEDIR + "/testreload.jks"; + createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), sCert); + + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + try { + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + + // Wait so that the file modification time is different + Thread.sleep((reloadInterval+ 1000)); + + // Change the certificate with a new keypair + final KeyPair anotherKP = generateKeyPair("RSA"); + sCert = KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", anotherKP, 30, + "SHA1withRSA"); + createKeyStore(keystoreLocation, "password", "cert1", anotherKP.getPrivate(), sCert); + + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return tm.getPrivateKey("cert1").equals(kp.getPrivate()); + } + }, (int) reloadInterval, 100000); + } finally { + fileMonitoringTimer.cancel(); + } + } + + @Test (timeout = 30000) + public void testReloadMissingTrustStore() throws Exception { + KeyPair kp = generateKeyPair("RSA"); + X509Certificate cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); + String keystoreLocation = BASEDIR + "/testmissing.jks"; + createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), cert1); + + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + try { + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + + assertFalse(reloaderLog.getOutput().contains( + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + + // Wait for the first reload to happen so we actually detect a change after the delete + Thread.sleep((reloadInterval+ 1000)); + + new File(keystoreLocation).delete(); + + // Wait for the reload to happen and log to get written to + Thread.sleep((reloadInterval+ 1000)); + + waitForFailedReloadAtLeastOnce((int) reloadInterval); + + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + } finally { + reloaderLog.stopCapturing(); + fileMonitoringTimer.cancel(); + } + } + + + @Test (timeout = 30000) + public void testReloadCorruptTrustStore() throws Exception { + KeyPair kp = generateKeyPair("RSA"); + X509Certificate cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); + String keystoreLocation = BASEDIR + "/testmissing.jks"; + createKeyStore(keystoreLocation, "password", "cert1", kp.getPrivate(), cert1); + + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + ReloadingX509KeystoreManager tm = + new ReloadingX509KeystoreManager("jks", keystoreLocation, + "password", + "password"); + try { + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(keystoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + + // Wait so that the file modification time is different + Thread.sleep((reloadInterval + 1000)); + + assertFalse(reloaderLog.getOutput().contains( + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + OutputStream os = new FileOutputStream(keystoreLocation); + os.write(1); + os.close(); + + waitForFailedReloadAtLeastOnce((int) reloadInterval); + + assertEquals(kp.getPrivate(), tm.getPrivateKey("cert1")); + } finally { + reloaderLog.stopCapturing(); + fileMonitoringTimer.cancel(); + } + } + + /**Wait for the reloader thread to load the configurations at least once + * by probing the log of the thread if the reload fails. + */ + private void waitForFailedReloadAtLeastOnce(int reloadInterval) + throws InterruptedException, TimeoutException { + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return reloaderLog.getOutput().contains( + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE); + } + }, reloadInterval, 10 * 1000); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java index 441f552649298..63589592f35dd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java @@ -30,10 +30,12 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; +import java.nio.file.Paths; import java.security.KeyPair; import java.security.cert.X509Certificate; import java.util.HashMap; import java.util.Map; +import java.util.Timer; import java.util.concurrent.TimeoutException; import static org.junit.Assert.assertEquals; @@ -50,7 +52,7 @@ public class TestReloadingX509TrustManager { private X509Certificate cert1; private X509Certificate cert2; private final LogCapturer reloaderLog = LogCapturer.captureLogs( - ReloadingX509TrustManager.LOG); + FileMonitoringTimerTask.LOG); @BeforeClass public static void setUp() throws Exception { @@ -64,12 +66,7 @@ public void testLoadMissingTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testmissing.jks"; ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); - try { - tm.init(); - } finally { - tm.destroy(); - } + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); } @Test(expected = IOException.class) @@ -80,12 +77,7 @@ public void testLoadCorruptTrustStore() throws Exception { os.close(); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); - try { - tm.init(); - } finally { - tm.destroy(); - } + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); } @Test (timeout = 30000) @@ -96,14 +88,17 @@ public void testReload() throws Exception { String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); // Wait so that the file modification time is different - Thread.sleep((tm.getReloadInterval() + 1000)); + Thread.sleep((reloadInterval+ 1000)); // Add another cert Map certs = new HashMap(); @@ -116,9 +111,9 @@ public void testReload() throws Exception { public Boolean get() { return tm.getAcceptedIssuers().length == 2; } - }, (int) tm.getReloadInterval(), 10000); + }, (int) reloadInterval, 100000); } finally { - tm.destroy(); + fileMonitoringTimer.cancel(); } } @@ -130,27 +125,38 @@ public void testReloadMissingTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testmissing.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); X509Certificate cert = tm.getAcceptedIssuers()[0]; assertFalse(reloaderLog.getOutput().contains( - ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE)); + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); + + // Wait for the first reload to happen so we actually detect a change after the delete + Thread.sleep((reloadInterval+ 1000)); + new File(truststoreLocation).delete(); - waitForFailedReloadAtLeastOnce((int) tm.getReloadInterval()); + // Wait for the reload to happen and log to get written to + Thread.sleep((reloadInterval+ 1000)); + + waitForFailedReloadAtLeastOnce((int) reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); - tm.destroy(); + fileMonitoringTimer.cancel(); } } + @Test (timeout = 30000) public void testReloadCorruptTrustStore() throws Exception { KeyPair kp = generateKeyPair("RSA"); @@ -159,29 +165,32 @@ public void testReloadCorruptTrustStore() throws Exception { String truststoreLocation = BASEDIR + "/testcorrupt.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + long reloadInterval = 10; + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10); + new ReloadingX509TrustManager("jks", truststoreLocation, "password"); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); final X509Certificate cert = tm.getAcceptedIssuers()[0]; // Wait so that the file modification time is different - Thread.sleep((tm.getReloadInterval() + 1000)); + Thread.sleep((reloadInterval + 1000)); assertFalse(reloaderLog.getOutput().contains( - ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE)); + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); OutputStream os = new FileOutputStream(truststoreLocation); os.write(1); os.close(); - waitForFailedReloadAtLeastOnce((int) tm.getReloadInterval()); + waitForFailedReloadAtLeastOnce((int) reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); - tm.destroy(); + fileMonitoringTimer.cancel(); } } @@ -194,7 +203,7 @@ private void waitForFailedReloadAtLeastOnce(int reloadInterval) @Override public Boolean get() { return reloaderLog.getOutput().contains( - ReloadingX509TrustManager.RELOAD_ERROR_MESSAGE); + FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE); } }, reloadInterval, 10 * 1000); } @@ -208,13 +217,15 @@ public void testNoPassword() throws Exception { String truststoreLocation = BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); + Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); final ReloadingX509TrustManager tm = - new ReloadingX509TrustManager("jks", truststoreLocation, null, 10); + new ReloadingX509TrustManager("jks", truststoreLocation, null); try { - tm.init(); + fileMonitoringTimer.schedule(new FileMonitoringTimerTask( + Paths.get(truststoreLocation), tm::loadFrom,null), 10, 10); assertEquals(1, tm.getAcceptedIssuers().length); } finally { - tm.destroy(); + fileMonitoringTimer.cancel(); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index 108ce50420640..eebe0baaf2356 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; +import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.test.GenericTestUtils; @@ -99,7 +100,7 @@ public void testSSLFactoryCleanup() throws Exception { Thread reloaderThread = null; for (Thread thread : threads) { if ((thread.getName() != null) - && (thread.getName().contains("Truststore reloader thread"))) { + && (thread.getName().contains(SSL_MONITORING_THREAD_NAME))) { reloaderThread = thread; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java index 9f9564a310641..4d9c32067339b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClient.java @@ -41,6 +41,7 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; +import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME; import org.apache.hadoop.test.TestGenericTestUtils; import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; @@ -476,7 +477,7 @@ public void testTimelineClientCleanup() throws Exception { Thread reloaderThread = null; for (Thread thread : threads) { if ((thread.getName() != null) - && (thread.getName().contains("Truststore reloader thread"))) { + && (thread.getName().contains(SSL_MONITORING_THREAD_NAME))) { reloaderThread = thread; } } diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index dff9a2b08a9f9..caa9a1b6c0bc6 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -11,7 +11,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. ---> +--> + 4.0.0 org.apache.hadoop From 294ccbc792471babbd0009aaecbd8b7c1fc427ad Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 1 Apr 2021 04:24:26 +0900 Subject: [PATCH 178/666] Revert "HADOOP-17608. Fix NPE in TestKMS (#2828)" This reverts commit d69088a097ff6f6bb25203bdc8ac5b71f8243c97. --- .../hadoop/crypto/key/kms/server/TestKMS.java | 37 +++++++++---------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index dbe685b6a3d8b..3d59e6f5be7b7 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -18,8 +18,6 @@ package org.apache.hadoop.crypto.key.kms.server; import java.util.function.Supplier; - -import org.apache.commons.lang3.ThreadUtils; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; import org.apache.curator.test.TestingServer; import org.apache.hadoop.conf.Configuration; @@ -527,7 +525,6 @@ public void testStartStop(final boolean ssl, final boolean kerberos) if (ssl) { sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf); try { - // the first reloader thread is created here sslFactory.init(); } catch (GeneralSecurityException ex) { throw new IOException(ex); @@ -544,29 +541,31 @@ public Void call() throws Exception { final URI uri = createKMSUri(getKMSUrl()); if (ssl) { - // the second reloader thread is created here KeyProvider testKp = createProvider(uri, conf); - Collection reloaderThreads = - ThreadUtils.findThreadsByName(SSL_RELOADER_THREAD_NAME); - // now there are two active reloader threads - assertEquals(2, reloaderThreads.size()); - // Explicitly close the provider so we can verify - // the second reloader thread is shutdown + ThreadGroup threadGroup = Thread.currentThread().getThreadGroup(); + while (threadGroup.getParent() != null) { + threadGroup = threadGroup.getParent(); + } + Thread[] threads = new Thread[threadGroup.activeCount()]; + threadGroup.enumerate(threads); + Thread reloaderThread = null; + for (Thread thread : threads) { + if ((thread.getName() != null) + && (thread.getName().contains(SSL_RELOADER_THREAD_NAME))) { + reloaderThread = thread; + } + } + Assert.assertTrue("Reloader is not alive", reloaderThread.isAlive()); + // Explicitly close the provider so we can verify the internal thread + // is shutdown testKp.close(); boolean reloaderStillAlive = true; for (int i = 0; i < 10; i++) { - for (Thread thread : reloaderThreads) { - if (!thread.isAlive()) { - reloaderStillAlive = false; - break; - } - } + reloaderStillAlive = reloaderThread.isAlive(); + if (!reloaderStillAlive) break; Thread.sleep(1000); } Assert.assertFalse("Reloader is still alive", reloaderStillAlive); - reloaderThreads = - ThreadUtils.findThreadsByName(SSL_RELOADER_THREAD_NAME); - assertEquals(1, reloaderThreads.size()); } if (kerberos) { From e0adb51a5eae4456781f5295910a961daae48776 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Thu, 1 Apr 2021 11:32:03 +0800 Subject: [PATCH 179/666] HDFS-15921. Improve the log for the Storage Policy Operations. Contributed by Bhavik Patel. --- .../org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java | 4 ++++ .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index 164368d28d6ca..173348f356473 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -434,6 +434,8 @@ static void unprotectedSetStoragePolicy(FSDirectory fsd, BlockManager bm, } final int snapshotId = iip.getLatestSnapshotId(); if (inode.isFile()) { + FSDirectory.LOG.debug("DIR* FSDirAAr.unprotectedSetStoragePolicy for " + + "File."); if (policyId != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { BlockStoragePolicy newPolicy = bm.getStoragePolicy(policyId); if (newPolicy.isCopyOnCreateFile()) { @@ -452,6 +454,8 @@ static void unprotectedSetStoragePolicy(FSDirectory fsd, BlockManager bm, } inode.asFile().setStoragePolicyID(policyId, snapshotId); } else if (inode.isDirectory()) { + FSDirectory.LOG.debug("DIR* FSDirAAr.unprotectedSetStoragePolicy for " + + "Directory."); setDirStoragePolicy(fsd, iip, policyId); } else { throw new FileNotFoundException(iip.getPath() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 0767a18b9ffed..aee4f68bdc7f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -854,6 +854,7 @@ public boolean setReplication(String src, short replication) public void unsetStoragePolicy(String src) throws IOException { checkNNStartup(); + stateChangeLog.debug("*DIR* NameNode.unsetStoragePolicy for path: {}", src); namesystem.unsetStoragePolicy(src); } @@ -861,12 +862,15 @@ public void unsetStoragePolicy(String src) public void setStoragePolicy(String src, String policyName) throws IOException { checkNNStartup(); + stateChangeLog.debug("*DIR* NameNode.setStoragePolicy for path: {}, " + + "policyName: {}", src, policyName); namesystem.setStoragePolicy(src, policyName); } @Override public BlockStoragePolicy getStoragePolicy(String path) throws IOException { checkNNStartup(); + stateChangeLog.debug("*DIR* NameNode.getStoragePolicy for path: {}", path); return namesystem.getStoragePolicy(path); } From 69a93ea49164e220227ab6698fc2a7f5c97914f7 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Thu, 1 Apr 2021 09:19:39 +0530 Subject: [PATCH 180/666] HDFS-15494. TestReplicaCachingGetSpaceUsed#testReplicaCachingGetSpaceUsedByRBWReplica Fails on Windows. Contributed by Ravuri Sushma sree. --- .../fsdataset/impl/TestReplicaCachingGetSpaceUsed.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java index 6abf5238682d0..d4382d27fb228 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java @@ -43,6 +43,7 @@ import java.util.Set; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DU_INTERVAL_KEY; +import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; import static org.junit.Assert.assertEquals; /** @@ -112,6 +113,8 @@ public void testReplicaCachingGetSpaceUsedByFINALIZEDReplica() @Test public void testReplicaCachingGetSpaceUsedByRBWReplica() throws Exception { + // This test cannot pass on Windows + assumeNotWindows(); FSDataOutputStream os = fs.create(new Path("/testReplicaCachingGetSpaceUsedByRBWReplica")); byte[] bytes = new byte[20480]; From c340cd9225b48bbb57cf9579549832aa652d71e7 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Thu, 1 Apr 2021 09:21:15 +0200 Subject: [PATCH 181/666] YARN-10720. YARN WebAppProxyServlet should support connection timeout to prevent proxy server from hanging. Contributed by Qi Zhu. --- .../hadoop/yarn/conf/YarnConfiguration.java | 14 ++++ .../src/main/resources/yarn-default.xml | 12 +++ .../server/webproxy/WebAppProxyServlet.java | 28 +++++-- .../webproxy/TestWebAppProxyServlet.java | 79 ++++++++++++++++++- 4 files changed, 126 insertions(+), 7 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 2cf4a3bccbfab..2103ae77c2f64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2668,6 +2668,20 @@ public static boolean isAclEnabled(Configuration conf) { public static final String DEFAULT_RM_APPLICATION_HTTPS_POLICY = "NONE"; + + // If the proxy connection time enabled. + public static final String RM_PROXY_TIMEOUT_ENABLED = + RM_PREFIX + "proxy.timeout.enabled"; + + public static final boolean DEFALUT_RM_PROXY_TIMEOUT_ENABLED = + true; + + public static final String RM_PROXY_CONNECTION_TIMEOUT = + RM_PREFIX + "proxy.connection.timeout"; + + public static final int DEFAULT_RM_PROXY_CONNECTION_TIMEOUT = + 60000; + /** * Interval of time the linux container executor should try cleaning up * cgroups entry when cleaning up a container. This is required due to what diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 12fc32fb9ca50..c51cf8ac3fa5d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -2574,6 +2574,18 @@
+ + Enable the web proxy connection timeout, default is enabled. + yarn.resourcemanager.proxy.timeout.enabled + true + + + + The web proxy connection timeout. + yarn.resourcemanager.proxy.connection.timeout + 60000 + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java index 0b6bb65d8db34..03b7077bc16d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java @@ -122,6 +122,9 @@ public HTML html() { } } + protected void setConf(YarnConfiguration conf){ + this.conf = conf; + } /** * Default constructor */ @@ -230,6 +233,14 @@ private void proxyLink(final HttpServletRequest req, String httpsPolicy = conf.get(YarnConfiguration.RM_APPLICATION_HTTPS_POLICY, YarnConfiguration.DEFAULT_RM_APPLICATION_HTTPS_POLICY); + + boolean connectionTimeoutEnabled = + conf.getBoolean(YarnConfiguration.RM_PROXY_TIMEOUT_ENABLED, + YarnConfiguration.DEFALUT_RM_PROXY_TIMEOUT_ENABLED); + int connectionTimeout = + conf.getInt(YarnConfiguration.RM_PROXY_CONNECTION_TIMEOUT, + YarnConfiguration.DEFAULT_RM_PROXY_CONNECTION_TIMEOUT); + if (httpsPolicy.equals("LENIENT") || httpsPolicy.equals("STRICT")) { ProxyCA proxyCA = getProxyCA(); // ProxyCA could be null when the Proxy is run outside the RM @@ -250,10 +261,18 @@ private void proxyLink(final HttpServletRequest req, InetAddress localAddress = InetAddress.getByName(proxyHost); LOG.debug("local InetAddress for proxy host: {}", localAddress); httpClientBuilder.setDefaultRequestConfig( - RequestConfig.custom() - .setCircularRedirectsAllowed(true) - .setLocalAddress(localAddress) - .build()); + connectionTimeoutEnabled ? + RequestConfig.custom() + .setCircularRedirectsAllowed(true) + .setLocalAddress(localAddress) + .setConnectionRequestTimeout(connectionTimeout) + .setSocketTimeout(connectionTimeout) + .setConnectTimeout(connectionTimeout) + .build() : + RequestConfig.custom() + .setCircularRedirectsAllowed(true) + .setLocalAddress(localAddress) + .build()); HttpRequestBase base = null; if (method.equals(HTTP.GET)) { @@ -621,7 +640,6 @@ private FetchedAppReport getFetchedAppReport(ApplicationId id) * again... If this method returns true, there was a redirect, and * it was handled by redirecting the current request to an error page. * - * @param path the part of the request path after the app id * @param id the app id * @param req the request object * @param resp the response object diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java index f05e05a2d6374..6c8993f6e80b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java @@ -23,6 +23,8 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -35,10 +37,14 @@ import java.net.HttpURLConnection; import java.net.URI; import java.net.URL; +import java.net.SocketTimeoutException; +import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Map; +import javax.servlet.ServletConfig; +import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; @@ -98,6 +104,7 @@ public static void start() throws Exception { context.setContextPath("/foo"); server.setHandler(context); context.addServlet(new ServletHolder(TestServlet.class), "/bar"); + context.addServlet(new ServletHolder(TimeOutTestServlet.class), "/timeout"); ((ServerConnector)server.getConnectors()[0]).setHost("localhost"); server.start(); originalPort = ((ServerConnector)server.getConnectors()[0]).getLocalPort(); @@ -145,6 +152,29 @@ protected void doPost(HttpServletRequest req, HttpServletResponse resp) } } + @SuppressWarnings("serial") + public static class TimeOutTestServlet extends HttpServlet { + + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) + throws ServletException, IOException { + try { + Thread.sleep(10 * 1000); + } catch (InterruptedException e) { + LOG.warn("doGet() interrupted", e); + resp.setStatus(HttpServletResponse.SC_BAD_REQUEST); + return; + } + resp.setStatus(HttpServletResponse.SC_OK); + } + + @Override + protected void doPost(HttpServletRequest req, HttpServletResponse resp) + throws ServletException, IOException { + resp.setStatus(HttpServletResponse.SC_OK); + } + } + @Test(timeout=5000) public void testWebAppProxyServlet() throws Exception { configuration.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9090"); @@ -256,6 +286,45 @@ public void testWebAppProxyServlet() throws Exception { } } + @Test(expected = SocketTimeoutException.class) + public void testWebAppProxyConnectionTimeout() + throws IOException, ServletException{ + HttpServletRequest request = mock(HttpServletRequest.class); + when(request.getMethod()).thenReturn("GET"); + when(request.getRemoteUser()).thenReturn("dr.who"); + when(request.getPathInfo()).thenReturn("/application_00_0"); + when(request.getHeaderNames()).thenReturn(Collections.emptyEnumeration()); + + HttpServletResponse response = mock(HttpServletResponse.class); + when(response.getOutputStream()).thenReturn(null); + + WebAppProxyServlet servlet = new WebAppProxyServlet(); + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.RM_PROXY_TIMEOUT_ENABLED, + true); + conf.setInt(YarnConfiguration.RM_PROXY_CONNECTION_TIMEOUT, + 1000); + + servlet.setConf(conf); + + ServletConfig config = mock(ServletConfig.class); + ServletContext context = mock(ServletContext.class); + when(config.getServletContext()).thenReturn(context); + + AppReportFetcherForTest appReportFetcher = + new AppReportFetcherForTest(new YarnConfiguration()); + + when(config.getServletContext() + .getAttribute(WebAppProxy.FETCHER_ATTRIBUTE)) + .thenReturn(appReportFetcher); + + appReportFetcher.answer = 7; + + servlet.init(config); + servlet.doGet(request, response); + + } + @Test(timeout=5000) public void testAppReportForEmptyTrackingUrl() throws Exception { configuration.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9090"); @@ -391,9 +460,9 @@ public void testWebAppProxyServerMainMethod() throws Exception { @Test(timeout=5000) public void testCheckHttpsStrictAndNotProvided() throws Exception { - HttpServletResponse resp = Mockito.mock(HttpServletResponse.class); + HttpServletResponse resp = mock(HttpServletResponse.class); StringWriter sw = new StringWriter(); - Mockito.when(resp.getWriter()).thenReturn(new PrintWriter(sw)); + when(resp.getWriter()).thenReturn(new PrintWriter(sw)); YarnConfiguration conf = new YarnConfiguration(); final URI httpLink = new URI("http://foo.com"); final URI httpsLink = new URI("https://foo.com"); @@ -566,6 +635,12 @@ public FetchedAppReport getApplicationReport(ApplicationId appId) return result; } else if (answer == 6) { return getDefaultApplicationReport(appId, false); + } else if (answer == 7) { + // test connection timeout + FetchedAppReport result = getDefaultApplicationReport(appId); + result.getApplicationReport().setOriginalTrackingUrl("localhost:" + + originalPort + "/foo/timeout?a=b#main"); + return result; } return null; } From 9d186362abc41d4ecc7e06c64534cfcff52f6c45 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Thu, 1 Apr 2021 11:39:08 +0200 Subject: [PATCH 182/666] YARN-9618. NodesListManager event improvement. Contributed by Qi Zhu. --- .../resourcemanager/NodesListManager.java | 8 +- .../rmapp/TestNodesListManager.java | 78 ++++++------------- 2 files changed, 26 insertions(+), 60 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java index f9e159168c5bd..07d78cb1c1c38 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java @@ -509,12 +509,8 @@ private void sendRMAppNodeUpdateEventToNonFinalizedApps( RMNode eventNode, RMAppNodeUpdateType appNodeUpdateType) { for(RMApp app : rmContext.getRMApps().values()) { if (!app.isAppFinalStateStored()) { - this.rmContext - .getDispatcher() - .getEventHandler() - .handle( - new RMAppNodeUpdateEvent(app.getApplicationId(), eventNode, - appNodeUpdateType)); + app.handle(new RMAppNodeUpdateEvent(app.getApplicationId(), eventNode, + appNodeUpdateType)); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java index 0df295c943630..35148e9a450a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java @@ -22,10 +22,7 @@ import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.spy; -import java.util.ArrayList; - import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -52,8 +49,8 @@ import org.mockito.ArgumentMatcher; public class TestNodesListManager { - // To hold list of application for which event was received - ArrayList applist = new ArrayList(); + private boolean isRMAppEvent; + private boolean isNodesListEvent; @Test(timeout = 300000) public void testNodeUsableEvent() throws Exception { @@ -68,67 +65,32 @@ protected Dispatcher createDispatcher() { }; rm.start(); MockNM nm1 = rm.registerNode("h1:1234", 28000); - NodesListManager nodesListManager = rm.getNodesListManager(); Resource clusterResource = Resource.newInstance(28000, 8); RMNode rmnode = MockNodes.newNodeInfo(1, clusterResource); // Create killing APP - RMApp killrmApp = MockRMAppSubmitter.submitWithMemory(200, rm); - rm.killApp(killrmApp.getApplicationId()); - rm.waitForState(killrmApp.getApplicationId(), RMAppState.KILLED); + RMApp killRmApp = MockRMAppSubmitter.submitWithMemory(200, rm); + rm.killApp(killRmApp.getApplicationId()); + rm.waitForState(killRmApp.getApplicationId(), RMAppState.KILLED); // Create finish APP - RMApp finshrmApp = MockRMAppSubmitter.submitWithMemory(2000, rm); + RMApp finshRmApp = MockRMAppSubmitter.submitWithMemory(2000, rm); nm1.nodeHeartbeat(true); - RMAppAttempt attempt = finshrmApp.getCurrentAppAttempt(); + RMAppAttempt attempt = finshRmApp.getCurrentAppAttempt(); MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId()); am.registerAppAttempt(); am.unregisterAppAttempt(); nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE); rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FINISHED); - // Create submitted App - RMApp subrmApp = MockRMAppSubmitter.submitWithMemory(200, rm); - // Fire Event for NODE_USABLE - nodesListManager.handle(new NodesListManagerEvent( + // Should not have RMAppNodeUpdateEvent to AsyncDispatcher. + dispatcher.getEventHandler().handle(new NodesListManagerEvent( NodesListManagerEventType.NODE_USABLE, rmnode)); - if (applist.size() > 0) { - Assert.assertTrue( - "Event based on running app expected " + subrmApp.getApplicationId(), - applist.contains(subrmApp.getApplicationId())); - Assert.assertFalse( - "Event based on finish app not expected " - + finshrmApp.getApplicationId(), - applist.contains(finshrmApp.getApplicationId())); - Assert.assertFalse( - "Event based on killed app not expected " - + killrmApp.getApplicationId(), - applist.contains(killrmApp.getApplicationId())); - } else { - Assert.fail("Events received should have beeen more than 1"); - } - applist.clear(); - - // Fire Event for NODE_UNUSABLE - nodesListManager.handle(new NodesListManagerEvent( - NodesListManagerEventType.NODE_UNUSABLE, rmnode)); - if (applist.size() > 0) { - Assert.assertTrue( - "Event based on running app expected " + subrmApp.getApplicationId(), - applist.contains(subrmApp.getApplicationId())); - Assert.assertFalse( - "Event based on finish app not expected " - + finshrmApp.getApplicationId(), - applist.contains(finshrmApp.getApplicationId())); - Assert.assertFalse( - "Event based on killed app not expected " - + killrmApp.getApplicationId(), - applist.contains(killrmApp.getApplicationId())); - } else { - Assert.fail("Events received should have beeen more than 1"); - } - + Assert.assertFalse("Got unexpected RM app event", + getIsRMAppEvent()); + Assert.assertTrue("Received no NodesListManagerEvent", + getIsNodesListEvent()); } @Test @@ -241,9 +203,10 @@ class EventArgMatcher implements ArgumentMatcher { @Override public boolean matches(AbstractEvent argument) { if (argument instanceof RMAppNodeUpdateEvent) { - ApplicationId appid = - ((RMAppNodeUpdateEvent) argument).getApplicationId(); - applist.add(appid); + isRMAppEvent = true; + } + if (argument instanceof NodesListManagerEvent) { + isNodesListEvent = true; } return false; } @@ -256,4 +219,11 @@ public boolean matches(AbstractEvent argument) { }; } + public boolean getIsNodesListEvent() { + return isNodesListEvent; + } + + public boolean getIsRMAppEvent() { + return isRMAppEvent; + } } From b4178dc2ac6d43800217a4c9227a31a5195cfaaf Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Thu, 1 Apr 2021 16:09:52 +0200 Subject: [PATCH 183/666] YARN-10726. Log the size of DelegationTokenRenewer event queue in case of too many pending events. Contributed by Qi Zhu. --- .../resourcemanager/security/DelegationTokenRenewer.java | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index b4da08f52fea9..d8368eb64828b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -89,6 +89,8 @@ public class DelegationTokenRenewer extends AbstractService { new Text("HDFS_DELEGATION_TOKEN"); public static final String SCHEME = "hdfs"; + private volatile int lastEventQueueSizeLogged = 0; + // global single timer (daemon) private Timer renewalTimer; private RMContext rmContext; @@ -228,6 +230,13 @@ private void processDelegationTokenRenewerEvent( futures.put(evt, future); } else { pendingEventQueue.add(evt); + int qSize = pendingEventQueue.size(); + if (qSize != 0 && qSize % 1000 == 0 + && lastEventQueueSizeLogged != qSize) { + lastEventQueueSizeLogged = qSize; + LOG.info("Size of pending " + + "DelegationTokenRenewerEvent queue is " + qSize); + } } } finally { serviceStateLock.readLock().unlock(); From 594d1448d33a94296a2ccdc43e8e8b13ea6d324a Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Fri, 2 Apr 2021 05:04:31 +0530 Subject: [PATCH 184/666] HDFS-15931 : Fix non-static inner classes for better memory management (#2830). Contributed by Viraj Jasani Signed-off-by: Mingliang Liu --- .../federation/MiniRouterDFSCluster.java | 2 +- .../impl/InMemoryLevelDBAliasMapClient.java | 2 +- .../impl/RamDiskReplicaLruTracker.java | 4 ++-- .../server/namenode/ReencryptionHandler.java | 2 +- .../apache/hadoop/hdfs/tools/DebugAdmin.java | 20 +++++++++---------- .../apache/hadoop/hdfs/MiniDFSCluster.java | 2 +- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index 0c9a2e0046c0b..896d08f2c49b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -152,7 +152,7 @@ public class MiniRouterDFSCluster { /** * Router context. */ - public class RouterContext { + public static class RouterContext { private Router router; private FileContext fileContext; private String nameserviceId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/InMemoryLevelDBAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/InMemoryLevelDBAliasMapClient.java index cacf8f102fa40..6cac72af82ca3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/InMemoryLevelDBAliasMapClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/InMemoryLevelDBAliasMapClient.java @@ -129,7 +129,7 @@ public Iterator iterator() { } } - class LevelDbWriter extends BlockAliasMap.Writer { + static class LevelDbWriter extends BlockAliasMap.Writer { private InMemoryAliasMapProtocol aliasMap; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java index 31e9ebe0b8c2e..aebedaab0ef8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java @@ -35,7 +35,7 @@ @InterfaceStability.Unstable public class RamDiskReplicaLruTracker extends RamDiskReplicaTracker { - private class RamDiskReplicaLru extends RamDiskReplica { + private static class RamDiskReplicaLru extends RamDiskReplica { long lastUsedTime; private RamDiskReplicaLru(String bpid, long blockId, @@ -88,7 +88,7 @@ synchronized void addReplica(final String bpid, final long blockId, } RamDiskReplicaLru ramDiskReplicaLru = new RamDiskReplicaLru(bpid, blockId, transientVolume, - lockedBytesReserved); + lockedBytesReserved); map.put(blockId, ramDiskReplicaLru); replicasNotPersisted.add(ramDiskReplicaLru); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java index ea38da6021ce8..b1c5928f8575d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java @@ -835,7 +835,7 @@ protected void readUnlock() { } } - private class ZoneTraverseInfo extends TraverseInfo { + private static class ZoneTraverseInfo extends TraverseInfo { private String ezKeyVerName; ZoneTraverseInfo(String ezKeyVerName) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java index 89389a094686a..1784ea2accf6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java @@ -65,7 +65,7 @@ public class DebugAdmin extends Configured implements Tool { /** * All the debug commands we can run. */ - private DebugCommand DEBUG_COMMANDS[] = { + private final DebugCommand[] DEBUG_COMMANDS = { new VerifyMetaCommand(), new ComputeMetaCommand(), new RecoverLeaseCommand(), @@ -75,7 +75,7 @@ public class DebugAdmin extends Configured implements Tool { /** * The base class for debug commands. */ - private abstract class DebugCommand { + private abstract static class DebugCommand { final String name; final String usageText; final String helpText; @@ -94,15 +94,15 @@ private abstract class DebugCommand { /** * The command for verifying a block metadata file and possibly block file. */ - private class VerifyMetaCommand extends DebugCommand { + private static class VerifyMetaCommand extends DebugCommand { VerifyMetaCommand() { super("verifyMeta", -"verifyMeta -meta [-block ]", -" Verify HDFS metadata and block files. If a block file is specified, we" + - System.lineSeparator() + -" will verify that the checksums in the metadata file match the block" + - System.lineSeparator() + -" file."); + "verifyMeta -meta [-block ]", + " Verify HDFS metadata and block files. If a block file is specified, we" + + System.lineSeparator() + + " will verify that the checksums in the metadata file match the block" + + System.lineSeparator() + + " file."); } int run(List args) throws IOException { @@ -210,7 +210,7 @@ int run(List args) throws IOException { /** * The command for verifying a block metadata file and possibly block file. */ - private class ComputeMetaCommand extends DebugCommand { + private static class ComputeMetaCommand extends DebugCommand { ComputeMetaCommand() { super("computeMeta", "computeMeta -block -out ", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 2faca69a5640f..aec5afc37b33a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -599,7 +599,7 @@ protected MiniDFSCluster(Builder builder) throws IOException { builder.useConfiguredTopologyMappingClass); } - public class DataNodeProperties { + public static class DataNodeProperties { final DataNode datanode; final Configuration conf; String[] dnArgs; From 5bf9179836e3cf39d82f986778c932195c8e921c Mon Sep 17 00:00:00 2001 From: litao Date: Fri, 2 Apr 2021 09:33:40 +0800 Subject: [PATCH 185/666] HDFS-15892. Add metric for editPendingQ in FSEditLogAsync (#2770) Signed-off-by: Takanobu Asanuma --- .../hadoop-common/src/site/markdown/Metrics.md | 1 + .../hadoop/hdfs/server/namenode/FSEditLogAsync.java | 12 ++++++++++++ .../server/namenode/metrics/NameNodeMetrics.java | 6 ++++++ 3 files changed, 19 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md index cc80e15ed2ebf..8423b5351b11f 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md @@ -228,6 +228,7 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a | `EditLogTailIntervalNumOps` | Total number of intervals between edit log tailings by standby NameNode | | `EditLogTailIntervalAvgTime` | Average time of intervals between edit log tailings by standby NameNode in milliseconds | | `EditLogTailInterval`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time between edit log tailings by standby NameNode in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. | +| `PendingEditsCount` | Current number of pending edits | FSNamesystem ------------ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java index 68af0c163dd89..f60b458260b83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java @@ -28,6 +28,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -53,6 +55,8 @@ class FSEditLogAsync extends FSEditLog implements Runnable { // of the edit log buffer - ie. a sync will eventually be forced. private final Deque syncWaitQ = new ArrayDeque(); + private long lastFull = 0; + FSEditLogAsync(Configuration conf, NNStorage storage, List editsDirs) { super(conf, storage, editsDirs); // op instances cannot be shared due to queuing for background thread. @@ -194,6 +198,11 @@ private void enqueueEdit(Edit edit) { if (!editPendingQ.offer(edit)) { Preconditions.checkState( isSyncThreadAlive(), "sync thread is not alive"); + long now = Time.monotonicNow(); + if (now - lastFull > 4000) { + lastFull = now; + LOG.info("Edit pending queue is full"); + } if (Thread.holdsLock(this)) { // if queue is full, synchronized caller must immediately relinquish // the monitor before re-offering to avoid deadlock with sync thread @@ -231,15 +240,18 @@ private Edit dequeueEdit() throws InterruptedException { public void run() { try { while (true) { + NameNodeMetrics metrics = NameNode.getNameNodeMetrics(); boolean doSync; Edit edit = dequeueEdit(); if (edit != null) { // sync if requested by edit log. doSync = edit.logEdit(); syncWaitQ.add(edit); + metrics.setPendingEditsCount(editPendingQ.size() + 1); } else { // sync when editq runs dry, but have edits pending a sync. doSync = !syncWaitQ.isEmpty(); + metrics.setPendingEditsCount(0); } if (doSync) { // normally edit log exceptions cause the NN to terminate, but tests diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java index de99ddfaa92d1..fd1fab7a7fa32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java @@ -87,6 +87,8 @@ public class NameNodeMetrics { MutableGaugeInt blockOpsQueued; @Metric("Number of blockReports and blockReceivedAndDeleted batch processed") MutableCounterLong blockOpsBatched; + @Metric("Number of pending edits") + MutableGaugeInt pendingEditsCount; @Metric("Number of file system operations") public long totalFileOps(){ @@ -343,6 +345,10 @@ public void addBlockOpsBatched(int count) { blockOpsBatched.incr(count); } + public void setPendingEditsCount(int size) { + pendingEditsCount.set(size); + } + public void addTransaction(long latency) { transactions.add(latency); } From 912f545279a37141bd597bb488cfb209f08ee5b1 Mon Sep 17 00:00:00 2001 From: litao Date: Fri, 2 Apr 2021 10:28:17 +0800 Subject: [PATCH 186/666] HDFS-15938. Fix java doc in FSEditLog (#2837). Contributed by tomscut. Signed-off-by: Akira Ajisaka Signed-off-by: Mingliang Liu --- .../org/apache/hadoop/hdfs/server/namenode/FSEditLog.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 2ef3a028acd2a..6b73bbdff436b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -25,6 +25,7 @@ import java.net.URI; import java.util.ArrayList; import java.util.Collection; +import java.util.EnumSet; import java.util.Iterator; import java.util.List; import java.util.concurrent.atomic.AtomicLong; @@ -1180,7 +1181,8 @@ void logDisallowSnapshot(String path) { /** * Log a CacheDirectiveInfo returned from - * {@link CacheManager#addDirective(CacheDirectiveInfo, FSPermissionChecker)} + * {@link CacheManager#addDirective(CacheDirectiveInfo, FSPermissionChecker, + * EnumSet)} */ void logAddCacheDirectiveInfo(CacheDirectiveInfo directive, boolean toLogRpcIds) { From 17efb60871c7f51109c121468f80dfc897af6db5 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Fri, 2 Apr 2021 09:47:20 +0530 Subject: [PATCH 187/666] HDFS-15222. Correct the "hdfs fsck -list-corruptfileblocks" command output. Contributed by Ravuri Sushma sree. --- .../org/apache/hadoop/hdfs/tools/DFSck.java | 4 +-- .../hadoop/hdfs/TestClientReportBadBlock.java | 2 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 36 ++++++++++--------- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index 8a2ef8b5920f5..db30133d0c028 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -227,7 +227,7 @@ private Integer listCorruptFileBlocks(String dir, String baseUrl) continue; numCorrupt++; if (numCorrupt == 1) { - out.println("The list of corrupt files under path '" + out.println("The list of corrupt blocks under path '" + dir + "' are:"); } out.println(line); @@ -237,7 +237,7 @@ private Integer listCorruptFileBlocks(String dir, String baseUrl) } } out.println("The filesystem under path '" + dir + "' has " - + numCorrupt + " CORRUPT files"); + + numCorrupt + " CORRUPT blocks"); if (numCorrupt == 0) errCode = 0; return errCode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java index 935a63992018c..2f5aa96757da9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java @@ -316,7 +316,7 @@ private static void testFsckListCorruptFilesBlocks(Path filePath, int errorCode) String outStr = runFsck(conf, errorCode, true, filePath.toString(), "-list-corruptfileblocks"); LOG.info("fsck -list-corruptfileblocks out: " + outStr); if (errorCode != 0) { - Assert.assertTrue(outStr.contains("CORRUPT files")); + Assert.assertTrue(outStr.contains("CORRUPT blocks")); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index ca5a87033e563..2c9075e19376b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -1136,7 +1136,7 @@ public void testFsckListCorruptFilesBlocks() throws Exception { String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks"); System.out.println("1. good fsck out: " + outStr); - assertTrue(outStr.contains("has 0 CORRUPT files")); + assertTrue(outStr.contains("has 0 CORRUPT blocks")); // delete the blocks final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { @@ -1159,19 +1159,19 @@ public void testFsckListCorruptFilesBlocks() throws Exception { waitForCorruptionBlocks(3, "/corruptData"); outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks"); System.out.println("2. bad fsck out: " + outStr); - assertTrue(outStr.contains("has 3 CORRUPT files")); + assertTrue(outStr.contains("has 3 CORRUPT blocks")); // Do a listing on a dir which doesn't have any corrupt blocks and validate util.createFiles(fs, "/goodData"); outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks"); System.out.println("3. good fsck out: " + outStr); - assertTrue(outStr.contains("has 0 CORRUPT files")); + assertTrue(outStr.contains("has 0 CORRUPT blocks")); util.cleanup(fs, "/goodData"); // validate if a directory have any invalid entries util.createFiles(fs, "/corruptDa"); outStr = runFsck(conf, 0, true, "/corruptDa", "-list-corruptfileblocks"); - assertTrue(outStr.contains("has 0 CORRUPT files")); + assertTrue(outStr.contains("has 0 CORRUPT blocks")); util.cleanup(fs, "/corruptData"); util.cleanup(fs, "/corruptDa"); } @@ -2120,7 +2120,7 @@ public void testFsckListCorruptSnapshotFiles() throws Exception { String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks"); System.out.println("1. good fsck out: " + outStr); - assertTrue(outStr.contains("has 0 CORRUPT files")); + assertTrue(outStr.contains("has 0 CORRUPT blocks")); // delete the blocks final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i() { @Override public Void run() throws Exception { - String path = "/"; - String outStr = runFsck(conf, -1, true, path, "-list-corruptfileblocks"); - - assertFalse(outStr.contains("The list of corrupt files under path '" + path + "' are:")); - assertFalse(outStr.contains("The filesystem under path '" + path + "' has ")); - assertTrue(outStr.contains("Failed to open path '" + path + "': Permission denied")); - return null; + String path = "/"; + String outStr = + runFsck(conf, -1, true, path, "-list-corruptfileblocks"); + + assertFalse(outStr.contains( + "The list of corrupt blocks under path '" + path + "' are:")); + assertFalse( + outStr.contains("The filesystem under path '" + path + "' has ")); + assertTrue(outStr + .contains("Failed to open path '" + path + "': Permission denied")); + return null; } }); } From 359974b1e433d7bf93f3e7b5cfedf81d9ae914da Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Fri, 2 Apr 2021 09:56:00 +0530 Subject: [PATCH 188/666] HADOOP-17610. DelegationTokenAuthenticator prints token information. Contributed by Ravuri Sushma sree. --- .../token/delegation/web/DelegationTokenAuthenticator.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java index 8546a76c1afa1..19427dcfafeb4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java @@ -138,8 +138,8 @@ public void authenticate(URL url, AuthenticatedURL.Token token) try { // check and renew TGT to handle potential expiration UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab(); - LOG.debug("No delegation token found for url={}, token={}, " - + "authenticating with {}", url, token, authenticator.getClass()); + LOG.debug("No delegation token found for url={}, " + + "authenticating with {}", url, authenticator.getClass()); authenticator.authenticate(url, token); } catch (IOException ex) { throw NetUtils.wrapException(url.getHost(), url.getPort(), From 92a472d30e58d4cfd8dc91dd29b8a83f606f9d3b Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Fri, 2 Apr 2021 10:03:50 +0530 Subject: [PATCH 189/666] HADOOP-17587. Kinit with keytab should not display the keytab file's full path in any logs. Contributed by Ravuri Sushma sree. --- .../org/apache/hadoop/security/UserGroupInformation.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index cc32dae170c6c..7e90b8ebd1dd9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -1125,9 +1125,10 @@ static void loginUserFromKeytab(String user, setLoginUser(u); - LOG.info("Login successful for user {} using keytab file {}. Keytab auto" + - " renewal enabled : {}", - user, path, isKerberosKeyTabLoginRenewalEnabled()); + LOG.info( + "Login successful for user {} using keytab file {}. Keytab auto" + + " renewal enabled : {}", + user, new File(path).getName(), isKerberosKeyTabLoginRenewalEnabled()); } /** From e47617e19359c8ce62cc827d86f49a8c90fd8a10 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Fri, 2 Apr 2021 22:07:33 +0530 Subject: [PATCH 190/666] HDFS-15944. Prevent truncation by snprintf (#2853) --- .../main/native/fuse-dfs/test/fuse_workload.c | 34 +++++++++++++++---- .../libhdfs-tests/test_libhdfs_threaded.c | 5 ++- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/fuse_workload.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/fuse_workload.c index 26c482ba28566..a94913e7c79ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/fuse_workload.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/fuse_workload.c @@ -184,7 +184,11 @@ static int testOpenTrunc(const char *base) const char * const SAMPLE2 = "this is the second file that we wrote. " "It's #2!"; - snprintf(path, sizeof(path), "%s/trunc.txt", base); + int szToWrite = snprintf(NULL, 0, "%s/trunc.txt", base); + EXPECT_INT_LT(szToWrite, PATH_MAX); + int szWritten = snprintf(path, sizeof(path), "%s/trunc.txt", base); + EXPECT_NONNEGATIVE(szWritten); + fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, 0644); if (fd < 0) { err = errno; @@ -252,7 +256,10 @@ int runFuseWorkloadImpl(const char *root, const char *pcomp, EXPECT_NONZERO(S_ISDIR(stBuf.st_mode)); // mkdir /a - snprintf(tmp, sizeof(tmp), "%s/a", base); + int szToWrite = snprintf(NULL, 0, "%s/a", base); + EXPECT_INT_LT(szToWrite, PATH_MAX); + int szWritten = snprintf(tmp, sizeof(tmp), "%s/a", base); + EXPECT_NONNEGATIVE(szWritten); RETRY_ON_EINTR_GET_ERRNO(ret, mkdir(tmp, 0755)); EXPECT_ZERO(ret); @@ -260,7 +267,10 @@ int runFuseWorkloadImpl(const char *root, const char *pcomp, EXPECT_INT_EQ(1, testReadDir(base, expectDirs, DIRS_A_AND_B)); // mkdir /b - snprintf(tmp, sizeof(tmp), "%s/b", base); + szToWrite = snprintf(NULL, 0, "%s/b", base); + EXPECT_INT_LT(szToWrite, PATH_MAX); + szWritten = snprintf(tmp, sizeof(tmp), "%s/b", base); + EXPECT_NONNEGATIVE(szWritten); RETRY_ON_EINTR_GET_ERRNO(ret, mkdir(tmp, 0755)); EXPECT_ZERO(ret); @@ -268,8 +278,16 @@ int runFuseWorkloadImpl(const char *root, const char *pcomp, EXPECT_INT_EQ(2, testReadDir(base, expectDirs, DIRS_A_AND_B)); // rename a -> c - snprintf(src, sizeof(src), "%s/a", base); - snprintf(dst, sizeof(dst), "%s/c", base); + szToWrite = snprintf(NULL, 0, "%s/a", base); + EXPECT_INT_LT(szToWrite, PATH_MAX); + szWritten = snprintf(src, sizeof(src), "%s/a", base); + EXPECT_NONNEGATIVE(szWritten); + + szToWrite = snprintf(NULL, 0, "%s/c", base); + EXPECT_INT_LT(szToWrite, PATH_MAX); + szWritten = snprintf(dst, sizeof(dst), "%s/c", base); + EXPECT_NONNEGATIVE(szWritten); + EXPECT_ZERO(rename(src, dst)); // readdir c and b @@ -294,7 +312,11 @@ int runFuseWorkloadImpl(const char *root, const char *pcomp, // open some files and write to them for (i = 0; i < NUM_FILE_CTX; i++) { - snprintf(tmp, sizeof(tmp), "%s/b/%d", base, i); + szToWrite = snprintf(NULL, 0, "%s/b/%d", base, i); + EXPECT_INT_LT(szToWrite, PATH_MAX); + szWritten = snprintf(tmp, sizeof(tmp), "%s/b/%d", base, i); + EXPECT_NONNEGATIVE(szWritten); + ctx[i].path = strdup(tmp); if (!ctx[i].path) { fprintf(stderr, "FUSE_WORKLOAD: OOM on line %d\n", __LINE__); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c index 343e05a64b747..29b31ff9d934a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_threaded.c @@ -223,7 +223,10 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs, int nFile; for (nFile = 0; nFile < 10000; nFile++) { char filename[PATH_MAX]; - snprintf(filename, PATH_MAX, "%s/many_files_%d", listDirTest, nFile); + int szToWrite = snprintf(NULL, 0, "%s/many_files_%d", listDirTest, nFile); + EXPECT_INT_LT(szToWrite, PATH_MAX); + int szWritten = snprintf(filename, PATH_MAX, "%s/many_files_%d", listDirTest, nFile); + EXPECT_NONNEGATIVE(szWritten); file = hdfsOpenFile(fs, filename, O_WRONLY, 0, 0, 0); EXPECT_NONNULL(file); EXPECT_ZERO(hdfsCloseFile(fs, file)); From 1297fb4e2a9b3531efff1e7e07490100cdca3511 Mon Sep 17 00:00:00 2001 From: litao Date: Sat, 3 Apr 2021 01:37:04 +0800 Subject: [PATCH 191/666] HDFS-15946. Fix java doc in FSPermissionChecker (#2855). Contributed by tomscut. Signed-off-by: Mingliang Liu --- .../apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index a83ec51529b50..3f80952a8e9af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -549,7 +549,6 @@ private boolean hasPermission(INodeAttributes inode, FsAction access) { * - Default entries may be present, but they are ignored during enforcement. * * @param inode INodeAttributes accessed inode - * @param snapshotId int snapshot ID * @param access FsAction requested permission * @param mode FsPermission mode from inode * @param aclFeature AclFeature of inode From a9d34fbe1a63e63eccf9c00e9eb768eb3ec0bbb3 Mon Sep 17 00:00:00 2001 From: zhuqi Date: Sun, 4 Apr 2021 05:41:02 +0800 Subject: [PATCH 192/666] HDFS-15930: Fix some @param errors in DirectoryScanner. (#2829). Contributed by Qi Zhu. Signed-off-by: Ayush Saxena Signed-off-by: He Xiaoqiao --- .../apache/hadoop/hdfs/server/datanode/DirectoryScanner.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 8e331712d44be..63865f69f29f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -173,7 +173,8 @@ public static class ScanInfoVolumeReport { /** * Create a new info list initialized to the given expected size. * - * @param sz initial expected size + * @param volume + * @param blockPools list of known block pools */ ScanInfoVolumeReport(final FsVolumeSpi volume, final Collection blockPools) { @@ -220,8 +221,6 @@ public static class BlockPoolReport { /** * Create a block pool report. - * - * @param volume */ BlockPoolReport() { this.blockPools = new HashSet<>(2); From 99a34b4e7ca3db16738d0bdc8de1ad6a8c1bd8cf Mon Sep 17 00:00:00 2001 From: zhuqi Date: Sun, 4 Apr 2021 17:22:03 +0800 Subject: [PATCH 193/666] HADOOP-17619: Fix DelegationTokenRenewer#updateRenewalTime java doc error. (#2846). Contributed by Qi Zhu. Signed-off-by: Ayush Saxena --- .../main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java index 38a7344d4f9ee..8cc9e78b7936b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java @@ -107,7 +107,7 @@ public boolean equals(final Object that) { * Set a new time for the renewal. * It can only be called when the action is not in the queue or any * collection because the hashCode may change - * @param newTime the new time + * @param delay the renewal time */ private void updateRenewalTime(long delay) { renewalTime = Time.now() + delay - delay/10; From a607830377f993c5ca677737850e6be7257aa2b0 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Sun, 4 Apr 2021 17:35:30 -0700 Subject: [PATCH 194/666] HADOOP-17614. Bump netty to the latest 4.1.61. (#2850) Reviewed-by: Ayush Saxena --- hadoop-project/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 1a549c8550729..0c73a74b14a9f 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -139,7 +139,7 @@ 2.2.4 3.2.4 3.10.6.Final - 4.1.50.Final + 4.1.61.Final 1.1.8.2 1.7.1 From 11fb7f8d5c02dcc72666af9e1456c2c1d5bfd3a0 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Mon, 5 Apr 2021 08:12:05 +0530 Subject: [PATCH 195/666] HDFS-15950. Remove unused hdfs.proto import (#2858) Signed-off-by: Akira Ajisaka --- .../hadoop-hdfs-client/src/main/proto/inotify.proto | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto index afcccaa13bd5d..e1ade19b27ee8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto @@ -32,7 +32,6 @@ package hadoop.hdfs; import "acl.proto"; import "xattr.proto"; -import "hdfs.proto"; enum EventType { EVENT_CREATE = 0x0; From 407b29418f69c8320df05218902a3b512325d1c1 Mon Sep 17 00:00:00 2001 From: litao Date: Mon, 5 Apr 2021 16:47:10 +0800 Subject: [PATCH 196/666] HDFS-15951. Remove unused parameters in NameNodeProxiesClient (#2859) Signed-off-by: Takanobu Asanuma --- .../java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java index 31bc2d97a8662..3725fc21590c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java @@ -78,11 +78,6 @@ public class NameNodeProxiesClient { private static final Logger LOG = LoggerFactory.getLogger( NameNodeProxiesClient.class); - /** Maximum # of retries for HAProxy with HAServiceProtocol. */ - private static final int MAX_RETRIES = 3; - /** Initial retry delay for HAProxy with HAServiceProtocol. */ - private static final int DELAY_MILLISECONDS = 200; - /** * Wrapper for a client proxy as well as its associated service ID. * This is simply used as a tuple-like return type for created NN proxy. From 9019afb627b35c9a6301cdb43f0b20d10e2390a9 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Mon, 5 Apr 2021 22:22:48 +0530 Subject: [PATCH 197/666] HDFS-15947. Replace deprecated protobuf APIs (#2856) --- .../src/main/native/libhdfspp/lib/common/util.cc | 8 ++++---- .../src/main/native/libhdfspp/lib/common/util.h | 2 +- .../src/main/native/libhdfspp/lib/rpc/request.cc | 8 ++++---- .../native/libhdfspp/lib/rpc/rpc_connection_impl.cc | 4 ++-- .../native/libhdfspp/tests/remote_block_reader_test.cc | 6 +++--- .../src/main/native/libhdfspp/tests/rpc_engine_test.cc | 10 +++++----- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc index 8f6e77a53959f..c0e10183297ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.cc @@ -56,7 +56,7 @@ std::string SerializeDelimitedProtobufMessage(const ::google::protobuf::MessageL std::string buf; - int size = msg->ByteSize(); + const auto size = msg->ByteSizeLong(); buf.reserve(pbio::CodedOutputStream::VarintSize32(size) + size); pbio::StringOutputStream ss(&buf); pbio::CodedOutputStream os(&ss); @@ -68,9 +68,9 @@ std::string SerializeDelimitedProtobufMessage(const ::google::protobuf::MessageL return buf; } -int DelimitedPBMessageSize(const ::google::protobuf::MessageLite *msg) { - size_t size = msg->ByteSize(); - return ::google::protobuf::io::CodedOutputStream::VarintSize32(size) + size; +size_t DelimitedPBMessageSize(const ::google::protobuf::MessageLite *msg) { + const auto size = msg->ByteSizeLong(); + return ::google::protobuf::io::CodedOutputStream::VarintSize64(size) + size; } std::shared_ptr GetRandomClientName() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h index 140f66e8482d0..45cd68a1f008f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/util.h @@ -47,7 +47,7 @@ Status ToStatus(const boost::system::error_code &ec); // Determine size of buffer that needs to be allocated in order to serialize msg // in delimited format -int DelimitedPBMessageSize(const ::google::protobuf::MessageLite *msg); +size_t DelimitedPBMessageSize(const ::google::protobuf::MessageLite *msg); // Construct msg from the input held in the CodedInputStream // return false on failure, otherwise return true diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc index ecac2bcb00c59..99762c89ee9e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/request.cc @@ -47,7 +47,7 @@ static const int kNoRetry = -1; static void AddHeadersToPacket(std::string *res, std::initializer_list headers, const std::string *payload) { - int len = 0; + size_t len = 0; std::for_each( headers.begin(), headers.end(), [&len](const pb::MessageLite *v) { len += DelimitedPBMessageSize(v); }); @@ -68,7 +68,7 @@ static void AddHeadersToPacket(std::string *res, std::for_each( headers.begin(), headers.end(), [&buf](const pb::MessageLite *v) { - buf = pbio::CodedOutputStream::WriteVarint32ToArray(v->ByteSize(), buf); + buf = pbio::CodedOutputStream::WriteVarint64ToArray(v->ByteSizeLong(), buf); buf = v->SerializeWithCachedSizesToArray(buf); }); @@ -78,13 +78,13 @@ static void AddHeadersToPacket(std::string *res, } static void ConstructPayload(std::string *res, const pb::MessageLite *header) { - int len = DelimitedPBMessageSize(header); + const auto len = DelimitedPBMessageSize(header); res->reserve(len); pbio::StringOutputStream ss(res); pbio::CodedOutputStream os(&ss); uint8_t *buf = os.GetDirectBufferForNBytesAndAdvance(len); assert(buf); - buf = pbio::CodedOutputStream::WriteVarint32ToArray(header->ByteSize(), buf); + buf = pbio::CodedOutputStream::WriteVarint64ToArray(header->ByteSizeLong(), buf); buf = header->SerializeWithCachedSizesToArray(buf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc index a5de92e61bf3d..5d434ef370a9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.cc @@ -36,7 +36,7 @@ using namespace ::std::placeholders; static void AddHeadersToPacket( std::string *res, std::initializer_list headers, const std::string *payload) { - int len = 0; + size_t len = 0; std::for_each( headers.begin(), headers.end(), [&len](const pb::MessageLite *v) { len += DelimitedPBMessageSize(v); }); @@ -57,7 +57,7 @@ static void AddHeadersToPacket( std::for_each( headers.begin(), headers.end(), [&buf](const pb::MessageLite *v) { - buf = pbio::CodedOutputStream::WriteVarint32ToArray(v->ByteSize(), buf); + buf = pbio::CodedOutputStream::WriteVarint64ToArray(v->ByteSizeLong(), buf); buf = v->SerializeWithCachedSizesToArray(buf); }); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc index 4843da917865f..ccec5812f61d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/remote_block_reader_test.cc @@ -119,7 +119,7 @@ static inline string ToDelimitedString(const pb::MessageLite *msg) { res.reserve(hdfs::DelimitedPBMessageSize(msg)); pbio::StringOutputStream os(&res); pbio::CodedOutputStream out(&os); - out.WriteVarint32(msg->ByteSize()); + out.WriteVarint64(msg->ByteSizeLong()); msg->SerializeToCodedStream(&out); return res; } @@ -141,9 +141,9 @@ static inline std::pair ProducePacket( *reinterpret_cast(prefix) = htonl(data.size() + checksum.size() + sizeof(int32_t)); *reinterpret_cast(prefix + sizeof(int32_t)) = - htons(proto.ByteSize()); + htons(static_cast(proto.ByteSizeLong())); std::string payload(prefix, sizeof(prefix)); - payload.reserve(payload.size() + proto.ByteSize() + checksum.size() + + payload.reserve(payload.size() + proto.ByteSizeLong() + checksum.size() + data.size()); proto.AppendToString(&payload); payload += checksum; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc index 931f873d6deb6..caf4842b29899 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/rpc_engine_test.cc @@ -88,9 +88,9 @@ class SharedConnectionEngine : public RpcEngine { static inline std::pair RpcResponse( const RpcResponseHeaderProto &h, const std::string &data, const boost::system::error_code &ec = boost::system::error_code()) { - uint32_t payload_length = - pbio::CodedOutputStream::VarintSize32(h.ByteSize()) + - pbio::CodedOutputStream::VarintSize32(data.size()) + h.ByteSize() + + const auto payload_length = + pbio::CodedOutputStream::VarintSize64(h.ByteSizeLong()) + + pbio::CodedOutputStream::VarintSize64(data.size()) + h.ByteSizeLong() + data.size(); std::string res; @@ -99,9 +99,9 @@ static inline std::pair RpcResponse( buf = pbio::CodedOutputStream::WriteLittleEndian32ToArray( htonl(payload_length), buf); - buf = pbio::CodedOutputStream::WriteVarint32ToArray(h.ByteSize(), buf); + buf = pbio::CodedOutputStream::WriteVarint64ToArray(h.ByteSizeLong(), buf); buf = h.SerializeWithCachedSizesToArray(buf); - buf = pbio::CodedOutputStream::WriteVarint32ToArray(data.size(), buf); + buf = pbio::CodedOutputStream::WriteVarint64ToArray(data.size(), buf); buf = pbio::CodedOutputStream::WriteStringToArray(data, buf); return std::make_pair(ec, std::move(res)); From 0751b242f33eb49ce650a4cac9a41a200f15701a Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Tue, 6 Apr 2021 00:19:21 +0530 Subject: [PATCH 198/666] HDFS-15940. Fixing and refactoring tests specific to Block recovery.(#2844). Contributed by Viraj Jasani Signed-off-by: Ayush Saxena Signed-off-by: Takanobu Asanuma --- .../server/datanode/TestBlockRecovery.java | 413 +++------------- .../server/datanode/TestBlockRecovery2.java | 463 ++++++++++++++++++ .../datanode/TestDataNodeReconfiguration.java | 4 - 3 files changed, 538 insertions(+), 342 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index ee522725e281e..995a135c4e30f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -19,21 +19,14 @@ package org.apache.hadoop.hdfs.server.datanode; import org.apache.hadoop.hdfs.AppendTestUtil; -import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -44,16 +37,13 @@ import java.io.File; import java.io.IOException; -import java.io.OutputStream; import java.net.InetSocketAddress; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.concurrent.Semaphore; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; @@ -78,13 +68,11 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.StripedFileTestUtil; -import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; @@ -99,11 +87,9 @@ import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; -import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.SleepAnswer; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; import org.slf4j.event.Level; @@ -298,13 +284,13 @@ public void tearDown() throws IOException { } } - /** Sync two replicas */ - private void testSyncReplicas(ReplicaRecoveryInfo replica1, - ReplicaRecoveryInfo replica2, - InterDatanodeProtocol dn1, - InterDatanodeProtocol dn2, - long expectLen) throws IOException { - + /** + * Sync two replicas. + */ + private void testSyncReplicas(ReplicaRecoveryInfo replica1, + ReplicaRecoveryInfo replica2, InterDatanodeProtocol dn1, + InterDatanodeProtocol dn2) throws IOException { + DatanodeInfo[] locs = new DatanodeInfo[]{ mock(DatanodeInfo.class), mock(DatanodeInfo.class)}; RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID); @@ -315,7 +301,7 @@ private void testSyncReplicas(ReplicaRecoveryInfo replica1, DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2); syncList.add(record1); syncList.add(record2); - + when(dn1.updateReplicaUnderRecovery(any(ExtendedBlock.class), anyLong(), anyLong(), anyLong())).thenReturn("storage1"); when(dn2.updateReplicaUnderRecovery(any(ExtendedBlock.class), anyLong(), @@ -325,7 +311,7 @@ private void testSyncReplicas(ReplicaRecoveryInfo replica1, recoveryWorker.new RecoveryTaskContiguous(rBlock); RecoveryTaskContiguous.syncBlock(syncList); } - + /** * BlockRecovery_02.8. * Two replicas are in Finalized state @@ -336,38 +322,38 @@ public void testFinalizedReplicas () throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } - ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED); - ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-2, ReplicaState.FINALIZED); + ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED); + ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.FINALIZED); InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class); InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class); - testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1); + testSyncReplicas(replica1, replica2, dn1, dn2); verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); // two finalized replicas have different length - replica1 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED); - replica2 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN2, GEN_STAMP-2, ReplicaState.FINALIZED); + replica1 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED); + replica2 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.FINALIZED); try { - testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1); + testSyncReplicas(replica1, replica2, dn1, dn2); Assert.fail("Two finalized replicas should not have different lengthes!"); } catch (IOException e) { Assert.assertTrue(e.getMessage().startsWith( "Inconsistent size of finalized replicas. ")); } } - + /** * BlockRecovery_02.9. - * One replica is Finalized and another is RBW. + * One replica is Finalized and another is RBW. * @throws IOException in case of an error */ @Test(timeout=60000) @@ -375,80 +361,81 @@ public void testFinalizedRbwReplicas() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } - + // rbw and finalized replicas have the same length - ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, + ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED); - ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, + ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RBW); InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class); InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class); - testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1); + testSyncReplicas(replica1, replica2, dn1, dn2); verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); - + // rbw replica has a different length from the finalized one - replica1 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED); - replica2 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW); + replica1 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED); + replica2 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RBW); dn1 = mock(InterDatanodeProtocol.class); dn2 = mock(InterDatanodeProtocol.class); - testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1); + testSyncReplicas(replica1, replica2, dn1, dn2); verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); verify(dn2, never()).updateReplicaUnderRecovery( block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); } - + /** * BlockRecovery_02.10. - * One replica is Finalized and another is RWR. + * One replica is Finalized and another is RWR. + * * @throws IOException in case of an error */ - @Test(timeout=60000) + @Test(timeout = 60000) public void testFinalizedRwrReplicas() throws IOException { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } - + // rbw and finalized replicas have the same length - ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED); - ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR); + ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED); + ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.RWR); InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class); InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class); - testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1); + testSyncReplicas(replica1, replica2, dn1, dn2); verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); verify(dn2, never()).updateReplicaUnderRecovery( block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); - + // rbw replica has a different length from the finalized one - replica1 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED); - replica2 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW); + replica1 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.FINALIZED); + replica2 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RBW); dn1 = mock(InterDatanodeProtocol.class); dn2 = mock(InterDatanodeProtocol.class); - testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1); + testSyncReplicas(replica1, replica2, dn1, dn2); verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); verify(dn2, never()).updateReplicaUnderRecovery( block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); } - + /** * BlockRecovery_02.11. * Two replicas are RBW. @@ -456,26 +443,27 @@ public void testFinalizedRwrReplicas() throws IOException { */ @Test(timeout=60000) public void testRBWReplicas() throws IOException { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } - ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW); - ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW); + ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.RBW); + ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RBW); InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class); InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class); long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2); - testSyncReplicas(replica1, replica2, dn1, dn2, minLen); + testSyncReplicas(replica1, replica2, dn1, dn2); verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen); verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen); } - + /** * BlockRecovery_02.12. - * One replica is RBW and another is RWR. + * One replica is RBW and another is RWR. + * * @throws IOException in case of an error */ @Test(timeout=60000) @@ -483,44 +471,45 @@ public void testRBW_RWRReplicas() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } - ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW); - ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR); + ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.RBW); + ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 2, ReplicaState.RWR); InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class); InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class); - testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1); + testSyncReplicas(replica1, replica2, dn1, dn2); verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); verify(dn2, never()).updateReplicaUnderRecovery( block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1); } - + /** - * BlockRecovery_02.13. + * BlockRecovery_02.13. * Two replicas are RWR. + * * @throws IOException in case of an error */ @Test(timeout=60000) public void testRWRReplicas() throws IOException { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } - ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RWR); - ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, - REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RWR); + ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN1, GEN_STAMP - 1, ReplicaState.RWR); + ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, + REPLICA_LEN2, GEN_STAMP - 2, ReplicaState.RWR); InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class); InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class); long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2); - testSyncReplicas(replica1, replica2, dn1, dn2, minLen); - + testSyncReplicas(replica1, replica2, dn1, dn2); + verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen); verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen); - } + } private Collection initRecoveringBlocks() throws IOException { Collection blocks = new ArrayList(1); @@ -708,132 +697,6 @@ public void testNotMatchedReplicaID() throws IOException { } } - @Test(timeout = 60000) - public void testEcRecoverBlocks() throws Throwable { - // Stop the Mocked DN started in startup() - tearDown(); - ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); - MiniDFSCluster cluster = - new MiniDFSCluster.Builder(conf).numDataNodes(8).build(); - - try { - cluster.waitActive(); - NamenodeProtocols preSpyNN = cluster.getNameNodeRpc(); - NamenodeProtocols spyNN = spy(preSpyNN); - - // Delay completeFile - GenericTestUtils.DelayAnswer delayer = - new GenericTestUtils.DelayAnswer(LOG); - doAnswer(delayer).when(spyNN).complete(anyString(), anyString(), any(), - anyLong()); - String topDir = "/myDir"; - DFSClient client = new DFSClient(null, spyNN, conf, null); - Path file = new Path(topDir + "/testECLeaseRecover"); - client.mkdirs(topDir, null, false); - client.enableErasureCodingPolicy(ecPolicy.getName()); - client.setErasureCodingPolicy(topDir, ecPolicy.getName()); - OutputStream stm = client.create(file.toString(), true); - - // write 5MB File - AppendTestUtil.write(stm, 0, 1024 * 1024 * 5); - final AtomicReference err = new AtomicReference(); - Thread t = new Thread() { - @Override - public void run() { - try { - stm.close(); - } catch (Throwable t) { - err.set(t); - } - } - }; - t.start(); - - // Waiting for close to get to latch - delayer.waitForCall(); - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - try { - return client.getNamenode().recoverLease(file.toString(), - client.getClientName()); - } catch (IOException e) { - return false; - } - } - }, 5000, 24000); - delayer.proceed(); - } finally { - cluster.shutdown(); - } - } - - /** - * Test to verify the race between finalizeBlock and Lease recovery - * - * @throws Exception - */ - @Test(timeout = 20000) - public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() throws Exception { - tearDown();// Stop the Mocked DN started in startup() - - Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000"); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(1).build(); - try { - cluster.waitClusterUp(); - DistributedFileSystem fs = cluster.getFileSystem(); - Path path = new Path("/test"); - FSDataOutputStream out = fs.create(path); - out.writeBytes("data"); - out.hsync(); - - List blocks = DFSTestUtil.getAllBlocks(fs.open(path)); - final LocatedBlock block = blocks.get(0); - final DataNode dataNode = cluster.getDataNodes().get(0); - - final AtomicBoolean recoveryInitResult = new AtomicBoolean(true); - Thread recoveryThread = new Thread() { - @Override - public void run() { - try { - DatanodeInfo[] locations = block.getLocations(); - final RecoveringBlock recoveringBlock = new RecoveringBlock( - block.getBlock(), locations, block.getBlock() - .getGenerationStamp() + 1); - try(AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) { - Thread.sleep(2000); - dataNode.initReplicaRecovery(recoveringBlock); - } - } catch (Exception e) { - recoveryInitResult.set(false); - } - } - }; - recoveryThread.start(); - try { - out.close(); - } catch (IOException e) { - Assert.assertTrue("Writing should fail", - e.getMessage().contains("are bad. Aborting...")); - } finally { - recoveryThread.join(); - } - Assert.assertTrue("Recovery should be initiated successfully", - recoveryInitResult.get()); - - dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock() - .getGenerationStamp() + 1, block.getBlock().getBlockId(), - block.getBlockSize()); - } finally { - if (null != cluster) { - cluster.shutdown(); - cluster = null; - } - } - } - /** * DNs report RUR instead of RBW, RWR or FINALIZED. Primary DN expected to * throw an exception. @@ -1107,57 +970,7 @@ public void run() { } } - /** - * Test for block recovery taking longer than the heartbeat interval. - */ - @Test(timeout = 300000L) - public void testRecoverySlowerThanHeartbeat() throws Exception { - tearDown(); // Stop the Mocked DN started in startup() - - SleepAnswer delayer = new SleepAnswer(3000, 6000); - testRecoveryWithDatanodeDelayed(delayer); - } - - /** - * Test for block recovery timeout. All recovery attempts will be delayed - * and the first attempt will be lost to trigger recovery timeout and retry. - */ - @Test(timeout = 300000L) - public void testRecoveryTimeout() throws Exception { - tearDown(); // Stop the Mocked DN started in startup() - final Random r = new Random(); - - // Make sure first commitBlockSynchronization call from the DN gets lost - // for the recovery timeout to expire and new recovery attempt - // to be started. - SleepAnswer delayer = new SleepAnswer(3000) { - private final AtomicBoolean callRealMethod = new AtomicBoolean(); - - @Override - public Object answer(InvocationOnMock invocation) throws Throwable { - boolean interrupted = false; - try { - Thread.sleep(r.nextInt(3000) + 6000); - } catch (InterruptedException ie) { - interrupted = true; - } - try { - if (callRealMethod.get()) { - return invocation.callRealMethod(); - } - callRealMethod.set(true); - return null; - } finally { - if (interrupted) { - Thread.currentThread().interrupt(); - } - } - } - }; - testRecoveryWithDatanodeDelayed(delayer); - } - - private void testRecoveryWithDatanodeDelayed( + static void testRecoveryWithDatanodeDelayed( GenericTestUtils.SleepAnswer recoveryDelayer) throws Exception { Configuration configuration = new HdfsConfiguration(); configuration.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); @@ -1209,80 +1022,4 @@ public Boolean get() { } } - /** - * Test that block will be recovered even if there are less than the - * specified minReplication datanodes involved in its recovery. - * - * Check that, after recovering, the block will be successfully replicated. - */ - @Test(timeout = 300000L) - public void testRecoveryWillIgnoreMinReplication() throws Exception { - tearDown(); // Stop the Mocked DN started in startup() - - final int blockSize = 4096; - final int numReplicas = 3; - final String filename = "/testIgnoreMinReplication"; - final Path filePath = new Path(filename); - Configuration configuration = new HdfsConfiguration(); - configuration.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); - configuration.setInt(DFS_NAMENODE_REPLICATION_MIN_KEY, 2); - configuration.setLong(DFS_BLOCK_SIZE_KEY, blockSize); - MiniDFSCluster cluster = null; - - try { - cluster = new MiniDFSCluster.Builder(configuration).numDataNodes(5) - .build(); - cluster.waitActive(); - final DistributedFileSystem dfs = cluster.getFileSystem(); - final FSNamesystem fsn = cluster.getNamesystem(); - - // Create a file and never close the output stream to trigger recovery - FSDataOutputStream out = dfs.create(filePath, (short) numReplicas); - out.write(AppendTestUtil.randomBytes(0, blockSize)); - out.hsync(); - - DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", - cluster.getNameNodePort()), configuration); - LocatedBlock blk = dfsClient.getNamenode(). - getBlockLocations(filename, 0, blockSize). - getLastLocatedBlock(); - - // Kill 2 out of 3 datanodes so that only 1 alive, thus < minReplication - List dataNodes = Arrays.asList(blk.getLocations()); - assertEquals(dataNodes.size(), numReplicas); - for (DatanodeInfo dataNode : dataNodes.subList(0, numReplicas - 1)) { - cluster.stopDataNode(dataNode.getName()); - } - - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return fsn.getNumDeadDataNodes() == 2; - } - }, 300, 300000); - - // Make sure hard lease expires to trigger replica recovery - cluster.setLeasePeriod(100L, 100L); - - // Wait for recovery to succeed - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - try { - return dfs.isFileClosed(filePath); - } catch (IOException e) {} - return false; - } - }, 300, 300000); - - // Wait for the block to be replicated - DFSTestUtil.waitForReplication(cluster, DFSTestUtil.getFirstBlock( - dfs, filePath), 1, numReplicas, 0); - - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java new file mode 100644 index 0000000000000..03d5851f23257 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java @@ -0,0 +1,463 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.ha.HAServiceProtocol; +import org.apache.hadoop.hdfs.AppendTestUtil; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; +import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.AutoCloseableLock; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; + +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +/** + * Test part 2 for sync all replicas in block recovery. + */ +public class TestBlockRecovery2 { + + private static final Logger LOG = + LoggerFactory.getLogger(TestBlockRecovery2.class); + + private static final String DATA_DIR = + MiniDFSCluster.getBaseDirectory() + "data"; + + private DataNode dn; + private Configuration conf; + private boolean tearDownDone; + + private final static String CLUSTER_ID = "testClusterID"; + private final static String POOL_ID = "BP-TEST"; + private final static InetSocketAddress NN_ADDR = new InetSocketAddress( + "localhost", 5020); + + @Rule + public TestName currentTestName = new TestName(); + + static { + GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(LOG, Level.TRACE); + } + + /** + * Starts an instance of DataNode. + * @throws IOException + */ + @Before + public void startUp() throws IOException { + tearDownDone = false; + conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR); + conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); + conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); + FileSystem.setDefaultUri(conf, + "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort()); + List locations = new ArrayList<>(); + File dataDir = new File(DATA_DIR); + FileUtil.fullyDelete(dataDir); + dataDir.mkdirs(); + StorageLocation location = StorageLocation.parse(dataDir.getPath()); + locations.add(location); + final DatanodeProtocolClientSideTranslatorPB namenode = + mock(DatanodeProtocolClientSideTranslatorPB.class); + + Mockito.doAnswer( + (Answer) invocation -> + (DatanodeRegistration) invocation.getArguments()[0]) + .when(namenode) + .registerDatanode(Mockito.any(DatanodeRegistration.class)); + + when(namenode.versionRequest()) + .thenReturn(new NamespaceInfo(1, CLUSTER_ID, POOL_ID, 1L)); + + when(namenode.sendHeartbeat( + Mockito.any(), + Mockito.any(), + Mockito.anyLong(), + Mockito.anyLong(), + Mockito.anyInt(), + Mockito.anyInt(), + Mockito.anyInt(), + Mockito.any(), + Mockito.anyBoolean(), + Mockito.any(), + Mockito.any())) + .thenReturn(new HeartbeatResponse( + new DatanodeCommand[0], + new NNHAStatusHeartbeat(HAServiceProtocol.HAServiceState.ACTIVE, 1), + null, ThreadLocalRandom.current().nextLong() | 1L)); + + dn = new DataNode(conf, locations, null, null) { + @Override + DatanodeProtocolClientSideTranslatorPB connectToNN( + InetSocketAddress nnAddr) throws IOException { + Assert.assertEquals(NN_ADDR, nnAddr); + return namenode; + } + }; + // Trigger a heartbeat so that it acknowledges the NN as active. + dn.getAllBpOs().get(0).triggerHeartbeatForTests(); + waitForActiveNN(); + } + + /** + * Wait for active NN up to 15 seconds. + */ + private void waitForActiveNN() { + try { + GenericTestUtils.waitFor(() -> + dn.getAllBpOs().get(0).getActiveNN() != null, 1000, 15 * 1000); + } catch (TimeoutException e) { + // Here its not failing, will again do the assertions for activeNN after + // this waiting period and fails there if BPOS has not acknowledged + // any NN as active. + LOG.warn("Failed to get active NN", e); + } catch (InterruptedException e) { + LOG.warn("InterruptedException while waiting to see active NN", e); + } + Assert.assertNotNull("Failed to get ActiveNN", + dn.getAllBpOs().get(0).getActiveNN()); + } + + /** + * Cleans the resources and closes the instance of datanode. + * @throws IOException if an error occurred + */ + @After + public void tearDown() throws IOException { + if (!tearDownDone && dn != null) { + try { + dn.shutdown(); + } catch(Exception e) { + LOG.error("Cannot close: ", e); + } finally { + File dir = new File(DATA_DIR); + if (dir.exists()) { + Assert.assertTrue( + "Cannot delete data-node dirs", FileUtil.fullyDelete(dir)); + } + } + tearDownDone = true; + } + } + + /** + * Test to verify the race between finalizeBlock and Lease recovery. + * + * @throws Exception + */ + @Test(timeout = 20000) + public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() + throws Exception { + // Stop the Mocked DN started in startup() + tearDown(); + + Configuration configuration = new HdfsConfiguration(); + configuration.set( + DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000"); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(configuration) + .numDataNodes(1).build(); + try { + cluster.waitClusterUp(); + DistributedFileSystem fs = cluster.getFileSystem(); + Path path = new Path("/test"); + FSDataOutputStream out = fs.create(path); + out.writeBytes("data"); + out.hsync(); + + List blocks = DFSTestUtil.getAllBlocks(fs.open(path)); + final LocatedBlock block = blocks.get(0); + final DataNode dataNode = cluster.getDataNodes().get(0); + + final AtomicBoolean recoveryInitResult = new AtomicBoolean(true); + Thread recoveryThread = new Thread(() -> { + try { + DatanodeInfo[] locations = block.getLocations(); + final BlockRecoveryCommand.RecoveringBlock recoveringBlock = + new BlockRecoveryCommand.RecoveringBlock(block.getBlock(), + locations, block.getBlock().getGenerationStamp() + 1); + try(AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) { + Thread.sleep(2000); + dataNode.initReplicaRecovery(recoveringBlock); + } + } catch (Exception e) { + recoveryInitResult.set(false); + } + }); + recoveryThread.start(); + try { + out.close(); + } catch (IOException e) { + Assert.assertTrue("Writing should fail", + e.getMessage().contains("are bad. Aborting...")); + } finally { + recoveryThread.join(); + } + Assert.assertTrue("Recovery should be initiated successfully", + recoveryInitResult.get()); + + dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock() + .getGenerationStamp() + 1, block.getBlock().getBlockId(), + block.getBlockSize()); + } finally { + if (null != cluster) { + cluster.shutdown(); + } + } + } + + /** + * Test for block recovery timeout. All recovery attempts will be delayed + * and the first attempt will be lost to trigger recovery timeout and retry. + */ + @Test(timeout = 300000L) + public void testRecoveryTimeout() throws Exception { + tearDown(); // Stop the Mocked DN started in startup() + final Random r = new Random(); + + // Make sure first commitBlockSynchronization call from the DN gets lost + // for the recovery timeout to expire and new recovery attempt + // to be started. + GenericTestUtils.SleepAnswer delayer = + new GenericTestUtils.SleepAnswer(3000) { + private final AtomicBoolean callRealMethod = new AtomicBoolean(); + + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + boolean interrupted = false; + try { + Thread.sleep(r.nextInt(3000) + 6000); + } catch (InterruptedException ie) { + interrupted = true; + } + try { + if (callRealMethod.get()) { + return invocation.callRealMethod(); + } + callRealMethod.set(true); + return null; + } finally { + if (interrupted) { + Thread.currentThread().interrupt(); + } + } + } + }; + TestBlockRecovery.testRecoveryWithDatanodeDelayed(delayer); + } + + /** + * Test for block recovery taking longer than the heartbeat interval. + */ + @Test(timeout = 300000L) + public void testRecoverySlowerThanHeartbeat() throws Exception { + tearDown(); // Stop the Mocked DN started in startup() + + GenericTestUtils.SleepAnswer delayer = + new GenericTestUtils.SleepAnswer(3000, 6000); + TestBlockRecovery.testRecoveryWithDatanodeDelayed(delayer); + } + + @Test(timeout = 60000) + public void testEcRecoverBlocks() throws Throwable { + // Stop the Mocked DN started in startup() + tearDown(); + ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); + MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(8).build(); + + try { + cluster.waitActive(); + NamenodeProtocols preSpyNN = cluster.getNameNodeRpc(); + NamenodeProtocols spyNN = spy(preSpyNN); + + // Delay completeFile + GenericTestUtils.DelayAnswer delayer = + new GenericTestUtils.DelayAnswer(LOG); + doAnswer(delayer).when(spyNN).complete(anyString(), anyString(), any(), + anyLong()); + String topDir = "/myDir"; + DFSClient client = new DFSClient(null, spyNN, conf, null); + Path file = new Path(topDir + "/testECLeaseRecover"); + client.mkdirs(topDir, null, false); + client.enableErasureCodingPolicy(ecPolicy.getName()); + client.setErasureCodingPolicy(topDir, ecPolicy.getName()); + OutputStream stm = client.create(file.toString(), true); + + // write 5MB File + AppendTestUtil.write(stm, 0, 1024 * 1024 * 5); + final AtomicReference err = new AtomicReference<>(); + Thread t = new Thread(() -> { + try { + stm.close(); + } catch (Throwable t1) { + err.set(t1); + } + }); + t.start(); + + // Waiting for close to get to latch + delayer.waitForCall(); + GenericTestUtils.waitFor(() -> { + try { + return client.getNamenode().recoverLease(file.toString(), + client.getClientName()); + } catch (IOException e) { + return false; + } + }, 5000, 24000); + delayer.proceed(); + } finally { + cluster.shutdown(); + } + } + + /** + * Test that block will be recovered even if there are less than the + * specified minReplication datanodes involved in its recovery. + * + * Check that, after recovering, the block will be successfully replicated. + */ + @Test(timeout = 300000L) + public void testRecoveryWillIgnoreMinReplication() throws Exception { + tearDown(); // Stop the Mocked DN started in startup() + + final int blockSize = 4096; + final int numReplicas = 3; + final String filename = "/testIgnoreMinReplication"; + final Path filePath = new Path(filename); + Configuration configuration = new HdfsConfiguration(); + configuration.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); + configuration.setInt(DFS_NAMENODE_REPLICATION_MIN_KEY, 2); + configuration.setLong(DFS_BLOCK_SIZE_KEY, blockSize); + MiniDFSCluster cluster = null; + + try { + cluster = new MiniDFSCluster.Builder(configuration).numDataNodes(5) + .build(); + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + final FSNamesystem fsn = cluster.getNamesystem(); + + // Create a file and never close the output stream to trigger recovery + FSDataOutputStream out = dfs.create(filePath, (short) numReplicas); + out.write(AppendTestUtil.randomBytes(0, blockSize)); + out.hsync(); + + DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", + cluster.getNameNodePort()), configuration); + LocatedBlock blk = dfsClient.getNamenode(). + getBlockLocations(filename, 0, blockSize). + getLastLocatedBlock(); + + // Kill 2 out of 3 datanodes so that only 1 alive, thus < minReplication + List dataNodes = Arrays.asList(blk.getLocations()); + assertEquals(dataNodes.size(), numReplicas); + for (DatanodeInfo dataNode : dataNodes.subList(0, numReplicas - 1)) { + cluster.stopDataNode(dataNode.getName()); + } + + GenericTestUtils.waitFor(() -> fsn.getNumDeadDataNodes() == 2, + 300, 300000); + + // Make sure hard lease expires to trigger replica recovery + cluster.setLeasePeriod(100L, 100L); + + // Wait for recovery to succeed + GenericTestUtils.waitFor(() -> { + try { + return dfs.isFileClosed(filePath); + } catch (IOException e) { + LOG.info("Something went wrong.", e); + } + return false; + }, 300, 300000); + + // Wait for the block to be replicated + DFSTestUtil.waitForReplication(cluster, DFSTestUtil.getFirstBlock( + dfs, filePath), 1, numReplicas, 0); + + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index ff3b3eabc3b4c..8cbd38bc601dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -28,8 +28,6 @@ import java.io.IOException; import java.net.InetSocketAddress; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationException; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -48,8 +46,6 @@ */ public class TestDataNodeReconfiguration { - private static final Logger LOG = - LoggerFactory.getLogger(TestBlockRecovery.class); private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory() + "data"; private final static InetSocketAddress NN_ADDR = new InetSocketAddress( From 1d0d2fecb7e4dedbcb83d1ce14ac037f4a0513e6 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Tue, 6 Apr 2021 01:16:14 +0000 Subject: [PATCH 199/666] YARN-10702. Add cluster metric for amount of CPU used by RM Event Processor. Contributed by Jim Brennan. --- .../hadoop/yarn/conf/YarnConfiguration.java | 12 ++ .../hadoop/yarn/event/EventDispatcher.java | 7 ++ .../src/main/resources/yarn-default.xml | 13 +++ .../resourcemanager/ClusterMetrics.java | 29 ++++- .../resourcemanager/ResourceManager.java | 110 +++++++++++++++++- .../webapp/MetricsOverviewTable.java | 5 +- .../webapp/dao/ClusterMetricsInfo.java | 15 ++- .../resourcemanager/webapp/TestNodesPage.java | 2 +- .../webapp/TestRMWebServices.java | 2 +- 9 files changed, 186 insertions(+), 9 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 2103ae77c2f64..833aeccd0bdf7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2927,6 +2927,18 @@ public static boolean isAclEnabled(Configuration conf) { public static final int DEFAULT_YARN_DISPATCHER_PRINT_EVENTS_INFO_THRESHOLD = 5000; + /** Resource manager dispatcher thread monitor sampling rate. + * Units are samples per minute. This controls how often to sample + * the cpu utilization of the resource manager dispatcher thread. + * The cpu utilization is displayed on the RM UI as scheduler busy %. + * Set to zero to disable the dispatcher thread monitor. + */ + public static final String + YARN_DISPATCHER_CPU_MONITOR_SAMPLES_PER_MIN = + YARN_PREFIX + "dispatcher.cpu-monitor.samples-per-min"; + public static final int + DEFAULT_YARN_DISPATCHER_CPU_MONITOR_SAMPLES_PER_MIN = 60; + /** * CLASSPATH for YARN applications. A comma-separated list of CLASSPATH * entries diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java index f51bae63dc6ec..11cdf150ddb74 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java @@ -154,4 +154,11 @@ public void setMetrics(EventTypeMetrics metrics) { this.metrics = metrics; } + protected long getEventProcessorId() { + return this.eventProcessor.getId(); + } + + protected boolean isStopped() { + return this.stopped; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index c51cf8ac3fa5d..0e9c00beeadc5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -120,6 +120,19 @@ 5000 + + + Resource manager dispatcher thread cpu monitor sampling rate. + Units are samples per minute. This controls how often to sample + the cpu utilization of the resource manager dispatcher thread. + The cpu utilization is displayed on the RM UI as scheduler busy %. + Set this to zero to disable the dispatcher thread monitor. Defaults + to 60 samples per minute. + + yarn.dispatcher.cpu-monitor.samples-per-min + 60 + + The expiry interval for application master reporting. yarn.am.liveness-monitor.expiry-interval-ms diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java index 95ef7a6a69f2f..a02eeef28b7ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java @@ -62,6 +62,12 @@ public class ClusterMetrics { @Metric("Vcore Utilization") MutableGaugeLong utilizedVirtualCores; @Metric("Memory Capability") MutableGaugeLong capabilityMB; @Metric("Vcore Capability") MutableGaugeLong capabilityVirtualCores; + @Metric("RM Event Processor CPU Usage 60 second Avg") MutableGaugeLong + rmEventProcCPUAvg; + @Metric("RM Event Processor CPU Usage 60 second Max") MutableGaugeLong + rmEventProcCPUMax; + + private boolean rmEventProcMonitorEnable = false; private static final MetricsInfo RECORD_INFO = info("ClusterMetrics", "Metrics for the Yarn Cluster"); @@ -118,6 +124,27 @@ public synchronized static void destroy() { INSTANCE = null; } + // Indicate whether RM Event Thread CPU Monitor is enabled + public void setRmEventProcMonitorEnable(boolean value) { + rmEventProcMonitorEnable = value; + } + public boolean getRmEventProcMonitorEnable() { + return rmEventProcMonitorEnable; + } + // RM Event Processor CPU Usage + public long getRmEventProcCPUAvg() { + return rmEventProcCPUAvg.value(); + } + public void setRmEventProcCPUAvg(long value) { + rmEventProcCPUAvg.set(value); + } + public long getRmEventProcCPUMax() { + return rmEventProcCPUMax.value(); + } + public void setRmEventProcCPUMax(long value) { + rmEventProcCPUMax.set(value); + } + //Active Nodemanagers public int getNumActiveNMs() { return numActiveNMs.value(); @@ -292,4 +319,4 @@ public long getUtilizedVirtualCores() { public void incrUtilizedVirtualCores(long delta) { utilizedVirtualCores.incr(delta); } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 6d2a9fed08b91..9b47431d76dc2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -49,8 +49,9 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.Time; import org.apache.hadoop.util.curator.ZKCuratorManager; +import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -138,6 +139,8 @@ import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadMXBean; import java.net.InetSocketAddress; import java.net.URI; import java.net.URL; @@ -451,8 +454,19 @@ protected void setRMStateStore(RMStateStore rmStore) { } protected EventHandler createSchedulerEventDispatcher() { - EventDispatcher dispatcher = new - EventDispatcher(this.scheduler, "SchedulerEventDispatcher"); + String dispatcherName = "SchedulerEventDispatcher"; + EventDispatcher dispatcher; + int threadMonitorRate = conf.getInt( + YarnConfiguration.YARN_DISPATCHER_CPU_MONITOR_SAMPLES_PER_MIN, + YarnConfiguration.DEFAULT_YARN_DISPATCHER_CPU_MONITOR_SAMPLES_PER_MIN); + + if (threadMonitorRate > 0) { + dispatcher = new SchedulerEventDispatcher(dispatcherName, + threadMonitorRate); + ClusterMetrics.getMetrics().setRmEventProcMonitorEnable(true); + } else { + dispatcher = new EventDispatcher(this.scheduler, dispatcherName); + } dispatcher. setMetrics(GenericEventTypeMetricsManager. create(dispatcher.getName(), SchedulerEventType.class)); @@ -1018,7 +1032,95 @@ public void handle(RMFatalEvent event) { } } - /** + @Private + private class SchedulerEventDispatcher extends + EventDispatcher { + + private final Thread eventProcessorMonitor; + + SchedulerEventDispatcher(String name, int samplesPerMin) { + super(scheduler, name); + this.eventProcessorMonitor = + new Thread(new EventProcessorMonitor(getEventProcessorId(), + samplesPerMin)); + this.eventProcessorMonitor + .setName("ResourceManager Event Processor Monitor"); + } + // EventProcessorMonitor keeps track of how much CPU the EventProcessor + // thread is using. It takes a configurable number of samples per minute, + // and then reports the Avg and Max of previous 60 seconds as cluster + // metrics. Units are usecs per second of CPU used. + // Avg is not accurate until one minute of samples have been received. + private final class EventProcessorMonitor implements Runnable { + private final long tid; + private final boolean run; + private final ThreadMXBean tmxb; + private final ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics(); + private final int samples; + EventProcessorMonitor(long id, int samplesPerMin) { + assert samplesPerMin > 0; + this.tid = id; + this.samples = samplesPerMin; + this.tmxb = ManagementFactory.getThreadMXBean(); + if (clusterMetrics != null && + tmxb != null && tmxb.isThreadCpuTimeSupported()) { + this.run = true; + clusterMetrics.setRmEventProcMonitorEnable(true); + } else { + this.run = false; + } + } + public void run() { + int index = 0; + long[] values = new long[samples]; + int sleepMs = (60 * 1000) / samples; + + while (run && !isStopped() && !Thread.currentThread().isInterrupted()) { + try { + long cpuBefore = tmxb.getThreadCpuTime(tid); + long wallClockBefore = Time.monotonicNow(); + Thread.sleep(sleepMs); + long wallClockDelta = Time.monotonicNow() - wallClockBefore; + long cpuDelta = tmxb.getThreadCpuTime(tid) - cpuBefore; + + // Nanoseconds / Milliseconds = usec per second + values[index] = cpuDelta / wallClockDelta; + + index = (index + 1) % samples; + long max = 0; + long sum = 0; + for (int i = 0; i < samples; i++) { + sum += values[i]; + max = Math.max(max, values[i]); + } + clusterMetrics.setRmEventProcCPUAvg(sum / samples); + clusterMetrics.setRmEventProcCPUMax(max); + } catch (InterruptedException e) { + LOG.error("Returning, interrupted : " + e); + return; + } + } + } + } + @Override + protected void serviceStart() throws Exception { + super.serviceStart(); + this.eventProcessorMonitor.start(); + } + + @Override + protected void serviceStop() throws Exception { + super.serviceStop(); + this.eventProcessorMonitor.interrupt(); + try { + this.eventProcessorMonitor.join(); + } catch (InterruptedException e) { + throw new YarnRuntimeException(e); + } + } + } + + /** * Transition to standby state in a new thread. The transition operation is * asynchronous to avoid deadlock caused by cyclic dependency. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java index 009a012e3bcd0..3ce4f2b51859c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java @@ -204,7 +204,8 @@ protected void render(Block html) { } SchedulerInfo schedulerInfo = new SchedulerInfo(this.rm); - + int schedBusy = clusterMetrics.getRmSchedulerBusyPercent(); + div.h3("Scheduler Metrics"). table("#schedulermetricsoverview"). thead().$class("ui-widget-header"). @@ -215,6 +216,7 @@ protected void render(Block html) { th().$class("ui-state-default").__("Maximum Allocation").__(). th().$class("ui-state-default") .__("Maximum Cluster Application Priority").__(). + th().$class("ui-state-default").__("Scheduler Busy %").__(). __(). __(). tbody().$class("ui-widget-content"). @@ -225,6 +227,7 @@ protected void render(Block html) { td(schedulerInfo.getMinAllocation().toString()). td(schedulerInfo.getMaxAllocation().toString()). td(String.valueOf(schedulerInfo.getMaxClusterLevelAppPriority())). + td(schedBusy == -1 ? UNAVAILABLE : String.valueOf(schedBusy)). __(). __().__(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java index b66c4d997a79e..779d233042637 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java @@ -57,6 +57,7 @@ public class ClusterMetricsInfo { private long totalVirtualCores; private int utilizedMBPercent; private int utilizedVirtualCoresPercent; + private int rmSchedulerBusyPercent; private int totalNodes; private int lostNodes; private int unhealthyNodes; @@ -143,7 +144,11 @@ public ClusterMetricsInfo(final ResourceScheduler rs) { this.utilizedVirtualCoresPercent = baseCores <= 0 ? 0 : (int) (clusterMetrics.getUtilizedVirtualCores() * 100 / baseCores); - + // Scheduler Busy is in usec per sec, so to get percent divide by 10^4 + // Set to -1 if disabled. + this.rmSchedulerBusyPercent = + clusterMetrics.getRmEventProcMonitorEnable() ? + (int)(clusterMetrics.getRmEventProcCPUAvg() / 10000L) : -1; this.activeNodes = clusterMetrics.getNumActiveNMs(); this.lostNodes = clusterMetrics.getNumLostNMs(); this.unhealthyNodes = clusterMetrics.getUnhealthyNMs(); @@ -271,6 +276,10 @@ public int getUtilizedVirtualCoresPercent() { return utilizedVirtualCoresPercent; } + public int getRmSchedulerBusyPercent() { + return rmSchedulerBusyPercent; + } + public void setContainersReserved(int containersReserved) { this.containersReserved = containersReserved; } @@ -383,6 +392,10 @@ public void setUtilizedVirtualCoresPercent(int utilizedVirtualCoresPercent) { this.utilizedVirtualCoresPercent = utilizedVirtualCoresPercent; } + public void setRmSchedulerBusyPercent(int rmSchedulerBusyPercent) { + this.rmSchedulerBusyPercent = rmSchedulerBusyPercent; + } + public ResourceInfo getTotalClusterResourcesAcrossPartition() { return totalClusterResourcesAcrossPartition; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java index 9b79938d37238..9ab6583b06ce4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java @@ -52,7 +52,7 @@ public class TestNodesPage { // Number of Actual Table Headers for NodesPage.NodesBlock might change in // future. In that case this value should be adjusted to the new value. - private final int numberOfThInMetricsTable = 22; + private final int numberOfThInMetricsTable = 23; private final int numberOfActualTableHeaders = 18; private final int numberOfThForOpportunisticContainers = 4; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java index 5785b1411443a..02094327f82bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java @@ -474,7 +474,7 @@ public void verifyClusterMetricsJSON(JSONObject json) throws JSONException, Exception { assertEquals("incorrect number of elements", 1, json.length()); JSONObject clusterinfo = json.getJSONObject("clusterMetrics"); - assertEquals("incorrect number of elements", 31, clusterinfo.length()); + assertEquals("incorrect number of elements", 32, clusterinfo.length()); verifyClusterMetrics( clusterinfo.getInt("appsSubmitted"), clusterinfo.getInt("appsCompleted"), clusterinfo.getInt("reservedMB"), clusterinfo.getInt("availableMB"), From 678949e0b27eb3ffb58d0b39c303cc5dc185a476 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Tue, 6 Apr 2021 10:09:10 +0530 Subject: [PATCH 200/666] HADOOP-17622. Avoid usage of deprecated IOUtils#cleanup API. (#2862) Signed-off-by: Takanobu Asanuma --- .../org/apache/hadoop/fs/TestLocalFileSystem.java | 2 +- .../apache/hadoop/metrics2/sink/TestFileSink.java | 2 +- .../apache/hadoop/net/unix/TestDomainSocket.java | 2 +- .../hdfs/server/datanode/BPServiceActor.java | 6 +++--- .../hadoop/hdfs/server/datanode/BlockReceiver.java | 2 +- .../hadoop/hdfs/server/datanode/BlockScanner.java | 2 +- .../hadoop/hdfs/server/datanode/DataXceiver.java | 4 ++-- .../hadoop/hdfs/server/datanode/VolumeScanner.java | 8 ++++---- .../datanode/checker/DatasetVolumeChecker.java | 6 +++--- .../datanode/erasurecode/StripedBlockReader.java | 2 +- .../datanode/fsdataset/ReplicaInputStreams.java | 4 ++-- .../fsdataset/impl/FsDatasetAsyncDiskService.java | 2 +- .../datanode/fsdataset/impl/FsDatasetImpl.java | 14 +++++++------- .../datanode/fsdataset/impl/FsDatasetUtil.java | 2 +- .../datanode/fsdataset/impl/FsVolumeList.java | 2 +- .../hdfs/server/namenode/EditsDoubleBuffer.java | 2 +- .../org/apache/hadoop/hdfs/tools/DebugAdmin.java | 4 ++-- .../offlineImageViewer/PBImageTextWriter.java | 10 +++++----- .../hadoop/fs/TestEnhancedByteBufferAccess.java | 2 +- .../java/org/apache/hadoop/fs/TestUnbuffer.java | 4 ++-- .../apache/hadoop/fs/permission/TestStickyBit.java | 6 +++--- .../hadoop/hdfs/TestDataTransferKeepalive.java | 2 +- .../java/org/apache/hadoop/hdfs/TestHFlush.java | 2 +- .../java/org/apache/hadoop/hdfs/TestPread.java | 4 ++-- .../org/apache/hadoop/hdfs/TestRollingUpgrade.java | 2 +- .../hdfs/TestSecureEncryptionZoneWithKMS.java | 2 +- .../hdfs/TestTrashWithSecureEncryptionZones.java | 2 +- .../hadoop/hdfs/TestWriteConfigurationToDFS.java | 2 +- .../datatransfer/sasl/TestSaslDataTransfer.java | 2 +- .../apache/hadoop/hdfs/qjournal/QJMTestUtil.java | 2 +- .../hadoop/hdfs/qjournal/TestSecureNNWithQJM.java | 4 ++-- .../hdfs/server/datanode/TestBlockReplacement.java | 2 +- .../hdfs/server/datanode/TestCachingStrategy.java | 2 +- .../hadoop/hdfs/server/mover/TestStorageMover.java | 2 +- .../hadoop/hdfs/server/namenode/FSAclBaseTest.java | 3 ++- .../hdfs/server/namenode/FSXAttrBaseTest.java | 2 +- .../hdfs/server/namenode/TestAclConfigFlag.java | 2 +- .../hdfs/server/namenode/TestAddStripedBlocks.java | 6 +++--- .../TestCommitBlockWithInvalidGenStamp.java | 2 +- .../hadoop/hdfs/server/namenode/TestEditLog.java | 6 ++++-- .../namenode/TestEditLogFileOutputStream.java | 2 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 2 +- .../hadoop/hdfs/server/namenode/TestMetaSave.java | 4 ++-- .../namenode/TestQuotaWithStripedBlocks.java | 2 +- .../hdfs/server/namenode/TestXAttrConfigFlag.java | 2 +- .../server/namenode/ha/TestRetryCacheWithHA.java | 2 +- .../namenode/snapshot/TestAclWithSnapshot.java | 2 +- .../namenode/snapshot/TestXAttrWithSnapshot.java | 2 +- .../hdfs/util/TestAtomicFileOutputStream.java | 2 +- .../apache/hadoop/hdfs/web/TestWebHDFSForHA.java | 12 ++++++------ .../org/apache/hadoop/tools/TestDistCpSync.java | 2 +- .../hadoop/tools/TestDistCpSyncReverseBase.java | 2 +- .../apache/hadoop/tools/TestDistCpWithAcls.java | 2 +- .../hadoop/tools/TestDistCpWithRawXAttrs.java | 2 +- .../apache/hadoop/tools/TestDistCpWithXAttrs.java | 2 +- .../hadoop/tools/TestGlobbedCopyListing.java | 4 ++-- .../apache/hadoop/tools/mapred/TestCopyMapper.java | 2 +- .../tools/mapred/TestUniformSizeInputFormat.java | 2 +- .../tools/mapred/lib/TestDynamicInputFormat.java | 2 +- .../hadoop/mapred/gridmix/ReadRecordFactory.java | 2 +- .../hadoop/mapred/gridmix/ReplayJobFactory.java | 2 +- .../hadoop/mapred/gridmix/SerialJobFactory.java | 2 +- .../hadoop/mapred/gridmix/StressJobFactory.java | 2 +- .../java/org/apache/hadoop/tools/rumen/Folder.java | 2 +- 64 files changed, 102 insertions(+), 99 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index 1384bb6a17f38..e7b72a93f3378 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -312,7 +312,7 @@ public void testHasFileDescriptor() throws IOException { .new LocalFSFileInputStream(path), 1024); assertNotNull(bis.getFileDescriptor()); } finally { - IOUtils.cleanup(null, bis); + IOUtils.cleanupWithLogger(null, bis); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java index b20653e6b204b..67889405c1068 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java @@ -115,7 +115,7 @@ public void testFileSink() throws IOException { IOUtils.copyBytes(is, baos, 1024, true); outFileContent = new String(baos.toByteArray(), "UTF-8"); } finally { - IOUtils.cleanup(null, baos, is); + IOUtils.cleanupWithLogger(null, baos, is); } // Check the out file content. Should be something like the following: diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java index 466c83eb58212..61cbd85f8d69f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java @@ -759,6 +759,6 @@ public void run() { readerThread.join(); Assert.assertFalse(failed.get()); Assert.assertEquals(3, bytesRead.get()); - IOUtils.cleanup(null, socks); + IOUtils.cleanupWithLogger(null, socks); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 3a5436196e834..d3212b6384eaa 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -618,8 +618,8 @@ void join() { private synchronized void cleanUp() { shouldServiceRun = false; - IOUtils.cleanup(null, bpNamenode); - IOUtils.cleanup(null, lifelineSender); + IOUtils.cleanupWithLogger(null, bpNamenode); + IOUtils.cleanupWithLogger(null, lifelineSender); bpos.shutdownActor(this); } @@ -992,7 +992,7 @@ public void close() { } catch (InterruptedException e) { Thread.currentThread().interrupt(); } - IOUtils.cleanup(null, lifelineNamenode); + IOUtils.cleanupWithLogger(null, lifelineNamenode); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 3f1773ccc79e3..52f25d7fb0bff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -369,7 +369,7 @@ public void close() throws IOException { streams.close(); } if (replicaHandler != null) { - IOUtils.cleanup(null, replicaHandler); + IOUtils.cleanupWithLogger(null, replicaHandler); replicaHandler = null; } if (measuredFlushTime) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java index 485cf00152a47..6dcfad418474a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java @@ -252,7 +252,7 @@ public synchronized void addVolumeScanner(FsVolumeReference ref) { if (!success) { // If we didn't create a new VolumeScanner object, we don't // need this reference to the volume. - IOUtils.cleanup(null, ref); + IOUtils.cleanupWithLogger(null, ref); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 278ee098362a7..c43fc3dae0ee1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -432,7 +432,7 @@ public void requestShortCircuitFds(final ExtendedBlock blk, blk.getBlockId(), dnR.getDatanodeUuid(), success)); } if (fis != null) { - IOUtils.cleanup(null, fis); + IOUtils.cleanupWithLogger(null, fis); } } } @@ -555,7 +555,7 @@ public void requestShortCircuitShm(String clientName) throws IOException { LOG.warn("Failed to shut down socket in error handler", e); } } - IOUtils.cleanup(null, shmInfo); + IOUtils.cleanupWithLogger(null, shmInfo); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index 6bc25eb24a675..0367b4a7aa3c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -451,7 +451,7 @@ private long scanBlock(ExtendedBlock cblock, long bytesPerSec) { } catch (IOException e) { resultHandler.handle(block, e); } finally { - IOUtils.cleanup(null, blockSender); + IOUtils.cleanupWithLogger(null, blockSender); } metrics.incrBlockVerificationFailures(); return -1; @@ -674,13 +674,13 @@ public void run() { // Save the current position of all block iterators and close them. for (BlockIterator iter : blockIters) { saveBlockIterator(iter); - IOUtils.cleanup(null, iter); + IOUtils.cleanupWithLogger(null, iter); } } finally { VolumeScannerCBInjector.get().terminationCallBack(this); // When the VolumeScanner exits, release the reference we were holding // on the volume. This will allow the volume to be removed later. - IOUtils.cleanup(null, ref); + IOUtils.cleanupWithLogger(null, ref); } } @@ -767,7 +767,7 @@ public synchronized void disableBlockPoolId(String bpid) { if (iter.getBlockPoolId().equals(bpid)) { LOG.trace("{}: disabling scanning on block pool {}", this, bpid); i.remove(); - IOUtils.cleanup(null, iter); + IOUtils.cleanupWithLogger(null, iter); if (curBlockIter == iter) { curBlockIter = null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java index d077d215a5750..997a6d972224c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java @@ -234,7 +234,7 @@ public void call(Set ignored1, } }), MoreExecutors.directExecutor()); } else { - IOUtils.cleanup(null, reference); + IOUtils.cleanupWithLogger(null, reference); if (numVolumes.decrementAndGet() == 0) { latch.countDown(); } @@ -311,7 +311,7 @@ public boolean checkVolume( ); return true; } else { - IOUtils.cleanup(null, volumeReference); + IOUtils.cleanupWithLogger(null, volumeReference); } return false; } @@ -404,7 +404,7 @@ private void markFailed() { } private void cleanup() { - IOUtils.cleanup(null, reference); + IOUtils.cleanupWithLogger(null, reference); invokeCallback(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java index b1ad03f28dbbf..54302e3c2561d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java @@ -158,7 +158,7 @@ private Peer newConnectedPeer(ExtendedBlock b, InetSocketAddress addr, return peer; } finally { if (!success) { - IOUtils.cleanup(null, peer); + IOUtils.cleanupWithLogger(null, peer); IOUtils.closeSocket(sock); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java index f40315a6da013..f8bd8c03e19a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java @@ -130,7 +130,7 @@ public void closeStreams() throws IOException { dataInFd = null; } if (volumeRef != null) { - IOUtils.cleanup(null, volumeRef); + IOUtils.cleanupWithLogger(null, volumeRef); volumeRef = null; } // throw IOException if there is any @@ -146,7 +146,7 @@ public void close() { dataInFd = null; IOUtils.closeStream(checksumIn); checksumIn = null; - IOUtils.cleanup(null, volumeRef); + IOUtils.cleanupWithLogger(null, volumeRef); volumeRef = null; } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java index 81213a033f00f..2a89a80d17a25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java @@ -335,7 +335,7 @@ public void run() { + block.getLocalBlock() + " URI " + replicaToDelete.getBlockURI()); } updateDeletedBlockId(block); - IOUtils.cleanup(null, volumeRef); + IOUtils.cleanupWithLogger(null, volumeRef); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 41791bb31a500..d06d3cfec6697 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -949,11 +949,11 @@ public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, return new ReplicaInputStreams( blockInStream, metaInStream, ref, datanode.getFileIoProvider()); } catch (IOException e) { - IOUtils.cleanup(null, blockInStream); + IOUtils.cleanupWithLogger(null, blockInStream); throw e; } } catch (IOException e) { - IOUtils.cleanup(null, ref); + IOUtils.cleanupWithLogger(null, ref); throw e; } } @@ -1421,7 +1421,7 @@ public ReplicaHandler append(ExtendedBlock b, replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes()); } catch (IOException e) { - IOUtils.cleanup(null, ref); + IOUtils.cleanupWithLogger(null, ref); throw e; } return new ReplicaHandler(replica, ref); @@ -1553,7 +1553,7 @@ public ReplicaHandler recoverAppend( replica = (ReplicaInPipeline) replicaInfo; } } catch (IOException e) { - IOUtils.cleanup(null, ref); + IOUtils.cleanupWithLogger(null, ref); throw e; } return new ReplicaHandler(replica, ref); @@ -1648,7 +1648,7 @@ public ReplicaHandler createRbw( + " for block " + b.getBlockId()); } } catch (IOException e) { - IOUtils.cleanup(null, ref); + IOUtils.cleanupWithLogger(null, ref); throw e; } @@ -1752,7 +1752,7 @@ private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw, // bump the replica's generation stamp to newGS rbw.getReplicaInfo().bumpReplicaGS(newGS); } catch (IOException e) { - IOUtils.cleanup(null, ref); + IOUtils.cleanupWithLogger(null, ref); throw e; } return new ReplicaHandler(rbw, ref); @@ -1905,7 +1905,7 @@ public ReplicaHandler createTemporary(StorageType storageType, try { newReplicaInfo = v.createTemporary(b); } catch (IOException e) { - IOUtils.cleanup(null, ref); + IOUtils.cleanupWithLogger(null, ref); throw e; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java index 812a7bf6e7451..621c2735a267c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java @@ -117,7 +117,7 @@ public static FileDescriptor openAndSeek(File file, long offset) } return raf.getFD(); } catch(IOException ioe) { - IOUtils.cleanup(null, raf); + IOUtils.cleanupWithLogger(null, raf); throw ioe; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index a87f13c71ce8c..9400c7c7f4ca1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -357,7 +357,7 @@ void addVolume(FsVolumeReference ref) throws IOException { } else { // If the volume is not put into a volume scanner, it does not need to // hold the reference. - IOUtils.cleanup(null, ref); + IOUtils.cleanupWithLogger(null, ref); } // If the volume is used to replace a failed volume, it needs to reset the // volume failure info for this volume. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java index affba028c08cd..ccc233efcbafe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java @@ -76,7 +76,7 @@ public void close() throws IOException { + " bytes still to be flushed and cannot be closed."); } - IOUtils.cleanup(null, bufCurrent, bufReady); + IOUtils.cleanupWithLogger(null, bufCurrent, bufReady); bufCurrent = bufReady = null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java index 1784ea2accf6f..f5967e15a63d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java @@ -202,7 +202,7 @@ int run(List args) throws IOException { blockFile); return 0; } finally { - IOUtils.cleanup(null, metaStream, dataStream, checksumStream); + IOUtils.cleanupWithLogger(null, metaStream, dataStream, checksumStream); } } } @@ -287,7 +287,7 @@ int run(List args) throws IOException { + " saved metadata to meta file " + outFile); return 0; } finally { - IOUtils.cleanup(null, metaOut); + IOUtils.cleanupWithLogger(null, metaOut); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java index cd4047d9e646c..ccab7b0c6957f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java @@ -319,10 +319,10 @@ private static class LevelDBStore implements Closeable { @Override public void close() throws IOException { if (batch != null) { - IOUtils.cleanup(null, batch); + IOUtils.cleanupWithLogger(null, batch); batch = null; } - IOUtils.cleanup(null, db); + IOUtils.cleanupWithLogger(null, db); db = null; } @@ -388,13 +388,13 @@ protected boolean removeEldestEntry(Map.Entry entry) { dirMap = new LevelDBStore(new File(dbDir, "dirMap")); } catch (IOException e) { LOG.error("Failed to open LevelDBs", e); - IOUtils.cleanup(null, this); + IOUtils.cleanupWithLogger(null, this); } } @Override public void close() throws IOException { - IOUtils.cleanup(null, dirChildMap, dirMap); + IOUtils.cleanupWithLogger(null, dirChildMap, dirMap); dirChildMap = null; dirMap = null; } @@ -515,7 +515,7 @@ public long getParentId(long id) throws IOException { @Override public void close() throws IOException { out.flush(); - IOUtils.cleanup(null, metadataMap); + IOUtils.cleanupWithLogger(null, metadataMap); } void append(StringBuffer buffer, int field) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java index f4c264c5e5d40..99b1ddbbc1130 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java @@ -841,7 +841,7 @@ public void test2GBMmapLimit() throws Exception { if (buf2 != null) { fsIn2.releaseBuffer(buf2); } - IOUtils.cleanup(null, fsIn, fsIn2); + IOUtils.cleanupWithLogger(null, fsIn, fsIn2); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java index ef4c04d0f20f6..feb77f8457c9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java @@ -87,7 +87,7 @@ public void testUnbufferClosesSockets() throws Exception { Assert.assertEquals(b, b2); } finally { if (stream != null) { - IOUtils.cleanup(null, stream); + IOUtils.cleanupWithLogger(null, stream); } if (cluster != null) { cluster.shutdown(); @@ -122,7 +122,7 @@ public void testOpenManyFilesViaTcp() throws Exception { } } finally { for (FSDataInputStream stream : streams) { - IOUtils.cleanup(null, stream); + IOUtils.cleanupWithLogger(null, stream); } if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java index a6409fde6cd2d..96d15e59a52ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java @@ -96,7 +96,7 @@ public void setup() throws Exception { @AfterClass public static void shutdown() throws Exception { - IOUtils.cleanup(null, hdfs, hdfsAsUser1, hdfsAsUser2); + IOUtils.cleanupWithLogger(null, hdfs, hdfsAsUser1, hdfsAsUser2); if (cluster != null) { cluster.shutdown(); } @@ -121,7 +121,7 @@ private void confirmCanAppend(Configuration conf, Path p) throws Exception { h.close(); h = null; } finally { - IOUtils.cleanup(null, h); + IOUtils.cleanupWithLogger(null, h); } } @@ -500,7 +500,7 @@ static private void writeFile(FileSystem hdfs, Path p) throws IOException { o.close(); o = null; } finally { - IOUtils.cleanup(null, o); + IOUtils.cleanupWithLogger(null, o); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java index 0bf21ee1d6219..9881f9250b2ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java @@ -227,7 +227,7 @@ public void testManyClosedSocketsInCache() throws Exception { IOUtils.copyBytes(stm, new IOUtils.NullOutputStream(), 1024); } } finally { - IOUtils.cleanup(null, stms); + IOUtils.cleanupWithLogger(null, stms); } assertEquals(5, peerCache.size()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java index d700765bd0336..711291c4051f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java @@ -177,7 +177,7 @@ public void hSyncEndBlock_00() throws IOException { blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0); assertEquals(3, blocks.getLocatedBlocks().size()); } finally { - IOUtils.cleanup(null, stm, fileSystem); + IOUtils.cleanupWithLogger(null, stm, fileSystem); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index ac0994d88936c..ac3c122aac89d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -363,8 +363,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { assertTrue(false); } finally { Mockito.reset(injector); - IOUtils.cleanup(null, input); - IOUtils.cleanup(null, output); + IOUtils.cleanupWithLogger(null, input); + IOUtils.cleanupWithLogger(null, output); fileSys.close(); cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java index 46fab472298d9..a716335d1aa0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java @@ -741,7 +741,7 @@ public void testCheckpointWithSNN() throws Exception { // do checkpoint in SNN again snn.doCheckpoint(); } finally { - IOUtils.cleanup(null, dfs); + IOUtils.cleanupWithLogger(null, dfs); if (snn != null) { snn.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java index db97c02e0920b..fcf1333054a4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java @@ -273,7 +273,7 @@ public void setup() throws Exception { @After public void shutdown() throws IOException { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java index cce145454578c..47288847519d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java @@ -229,7 +229,7 @@ public static void init() throws Exception { @AfterClass public static void destroy() { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java index 5503238330ace..52abbe9cd3066 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java @@ -54,7 +54,7 @@ public void testWriteConf() throws Exception { fs.close(); fs = null; } finally { - IOUtils.cleanup(null, os, fs); + IOUtils.cleanupWithLogger(null, os, fs); cluster.shutdown(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java index d6612c1c709b4..3dd0b7eb99ea1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java @@ -256,7 +256,7 @@ public DataEncryptionKey newDataEncryptionKey() { } catch (SocketTimeoutException e) { GenericTestUtils.assertExceptionContains("Read timed out", e); } finally { - IOUtils.cleanup(null, socket, serverSocket); + IOUtils.cleanupWithLogger(null, socket, serverSocket); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java index 539f21647a8f0..91680578c6ff5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java @@ -173,7 +173,7 @@ public static long recoverAndReturnLastTxn(QuorumJournalManager qjm) lastRecoveredTxn = elis.getLastTxId(); } } finally { - IOUtils.cleanup(null, streams.toArray(new Closeable[0])); + IOUtils.cleanupWithLogger(null, streams.toArray(new Closeable[0])); } return lastRecoveredTxn; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java index 35bae49bbc4bf..eb64cf57dd7c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java @@ -163,7 +163,7 @@ public void setup() throws Exception { @After public void shutdown() throws IOException { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); cluster = null; @@ -213,7 +213,7 @@ private void doNNWithQJMTest() throws IOException { * @throws IOException if there is an I/O error */ private void restartNameNode() throws IOException { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); cluster.restartNameNode(); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java index 67b41f806125d..3f99f1bb12eb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java @@ -479,7 +479,7 @@ public void testDeletedBlockWhenAddBlockIsInEdit() throws Exception { assertEquals("The block should be only on 1 datanode ", 1, locatedBlocks1.get(0).getLocations().length); } finally { - IOUtils.cleanup(null, client); + IOUtils.cleanupWithLogger(null, client); cluster.shutdown(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java index 459ef88768c1d..011df46900c22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java @@ -332,7 +332,7 @@ public void testFadviseSkippedForSmallReads() throws Exception { stats = tracker.getStats(fadvisedFileName); stats.assertNotDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE); } finally { - IOUtils.cleanup(null, fis); + IOUtils.cleanupWithLogger(null, fis); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index fec04be65b24a..d95e76fc85294 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -230,7 +230,7 @@ private void runBasicTest(boolean shutdown) throws Exception { } void shutdownCluster() throws Exception { - IOUtils.cleanup(null, dfs); + IOUtils.cleanupWithLogger(null, dfs); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java index 8a2c0e206fe88..3a9ad2573b841 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -108,7 +108,8 @@ public void setUp() throws Exception { @After public void destroyFileSystems() { - IOUtils.cleanup(null, fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember); + IOUtils.cleanupWithLogger(null, fs, fsAsBruce, fsAsDiana, + fsAsSupergroupMember); fs = fsAsBruce = fsAsDiana = fsAsSupergroupMember = fsAsBob = null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java index 019531c4de736..44e86ae66a87d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java @@ -127,7 +127,7 @@ public void setUp() throws Exception { @After public void destroyFileSystems() { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); fs = null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java index f45728f167d46..8e01f732483a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java @@ -55,7 +55,7 @@ public class TestAclConfigFlag { @After public void shutdown() throws Exception { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index d17a36f8ba2c3..ef91fc3a0e9f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -206,7 +206,7 @@ public void testAddStripedBlock() throws Exception { assertTrue(blocks[0].isStriped()); checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false); } finally { - IOUtils.cleanup(null, out); + IOUtils.cleanupWithLogger(null, out); } } @@ -271,7 +271,7 @@ public void testGetLocatedStripedBlocks() throws Exception { assertArrayEquals(indices, blockIndices); assertArrayEquals(expectedDNs, datanodes); } finally { - IOUtils.cleanup(null, out); + IOUtils.cleanupWithLogger(null, out); } } @@ -327,7 +327,7 @@ public void testAddUCReplica() throws Exception { assertTrue(storageIDs.contains(newstorage.getStorageID())); } } finally { - IOUtils.cleanup(null, out); + IOUtils.cleanupWithLogger(null, out); } // 3. restart the namenode. mimic the full block reports and check the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockWithInvalidGenStamp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockWithInvalidGenStamp.java index 77a32680f3207..f7b1ea5bac351 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockWithInvalidGenStamp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockWithInvalidGenStamp.java @@ -95,7 +95,7 @@ public void testCommitWithInvalidGenStamp() throws Exception { dfs.getClient().getClientName(), previous, fileNode.getId()); Assert.assertTrue("should complete successfully", complete); } finally { - IOUtils.cleanup(null, out); + IOUtils.cleanupWithLogger(null, out); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index dbf65fadb101b..ae27730d9ce8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -1548,7 +1548,8 @@ public boolean accept(File dir, String name) { LOG.error("edit log failover didn't work", e); fail("Edit log failover didn't work"); } finally { - IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0])); + IOUtils.cleanupWithLogger(null, + streams.toArray(new EditLogInputStream[0])); } } @@ -1598,7 +1599,8 @@ public boolean accept(File dir, String name) { LOG.error("edit log failover didn't work", e); fail("Edit log failover didn't work"); } finally { - IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0])); + IOUtils.cleanupWithLogger(null, + streams.toArray(new EditLogInputStream[0])); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java index 67b48b10cd6cb..9ea4548fb5107 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java @@ -154,7 +154,7 @@ public void testEditLogFileOutputStreamAbortAbort() throws IOException { editLogStream.abort(); editLogStream.abort(); } finally { - IOUtils.cleanup(null, editLogStream); + IOUtils.cleanupWithLogger(null, editLogStream); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 2c9075e19376b..b27a00d257f89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -568,7 +568,7 @@ public void checkSalvagedRemains() throws IOException { } } } finally { - IOUtils.cleanup(null, in); + IOUtils.cleanupWithLogger(null, in); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 0c9b499ddeba7..c88570b56e0e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -212,7 +212,7 @@ public void testMetaSaveOverwrite() throws Exception { line = rdr.readLine(); } } finally { - IOUtils.cleanup(null, rdr, isr, fis); + IOUtils.cleanupWithLogger(null, rdr, isr, fis); } } @@ -271,7 +271,7 @@ public void testConcurrentMetaSave() throws Exception { line = rdr.readLine(); } } finally { - IOUtils.cleanup(null, rdr, isr, fis); + IOUtils.cleanupWithLogger(null, rdr, isr, fis); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java index 38b98a40e2617..e89ed8dbb6bc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java @@ -140,7 +140,7 @@ public void testUpdatingQuotaCount() throws Exception { Assert.assertEquals(cellSize * groupSize, actualDiskUsed); } finally { - IOUtils.cleanup(null, out); + IOUtils.cleanupWithLogger(null, out); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java index 5064a0339d278..c5278371c8bc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java @@ -48,7 +48,7 @@ public class TestXAttrConfigFlag { @After public void shutdown() throws Exception { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 90742479c162a..e3e934b5e3776 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -1315,7 +1315,7 @@ public void run() { } catch (Exception e) { LOG.info("Got Exception while calling " + op.name, e); } finally { - IOUtils.cleanup(null, op.client); + IOUtils.cleanupWithLogger(null, op.client); } } }.start(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java index ea9c5e2bb1437..511607e1174d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java @@ -86,7 +86,7 @@ public static void init() throws Exception { @AfterClass public static void shutdown() throws Exception { - IOUtils.cleanup(null, hdfs, fsAsBruce, fsAsDiana); + IOUtils.cleanupWithLogger(null, hdfs, fsAsBruce, fsAsDiana); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java index 4b957bf9365d4..2c93e12bc026d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java @@ -79,7 +79,7 @@ public static void init() throws Exception { @AfterClass public static void shutdown() throws Exception { - IOUtils.cleanup(null, hdfs); + IOUtils.cleanupWithLogger(null, hdfs); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java index bce92ef797da2..144f990ce9ebd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java @@ -142,7 +142,7 @@ public void testFailToRename() throws IOException { fos = null; } } finally { - IOUtils.cleanup(null, fos); + IOUtils.cleanupWithLogger(null, fos); FileUtil.setWritable(TEST_DIR, true); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java index 2212939d8f64b..1fc54d64ee17e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java @@ -95,7 +95,7 @@ public void testHA() throws IOException { final Path dir2 = new Path("/test2"); Assert.assertTrue(fs.mkdirs(dir2)); } finally { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); } @@ -130,7 +130,7 @@ public void testSecureHAToken() throws IOException, InterruptedException { verify(fs).renewDelegationToken(token); verify(fs).cancelDelegationToken(token); } finally { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); } @@ -199,7 +199,7 @@ public void testClientFailoverWhenStandbyNNHasStaleCredentials() Exception unwrapped = re.unwrapRemoteException(StandbyException.class); Assert.assertTrue(unwrapped instanceof StandbyException); } finally { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); } @@ -238,7 +238,7 @@ public void testFailoverAfterOpen() throws IOException { IOUtils.readFully(in, buf, 0, buf.length); Assert.assertArrayEquals(data, buf); } finally { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); } @@ -264,7 +264,7 @@ public void testMultipleNamespacesConfigured() throws Exception { fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf); Assert.assertEquals(2, fs.getResolvedNNAddr().length); } finally { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); } @@ -304,7 +304,7 @@ public void run() { } catch (IOException e) { result = false; } finally { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); } synchronized (TestWebHDFSForHA.this) { resultMap.put("mkdirs", result); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java index d6bbc25fdc7a8..c80a79bbb88bf 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java @@ -83,7 +83,7 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { - IOUtils.cleanup(null, dfs); + IOUtils.cleanupWithLogger(null, dfs); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSyncReverseBase.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSyncReverseBase.java index cca1c5381c48a..50f5823656e37 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSyncReverseBase.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSyncReverseBase.java @@ -153,7 +153,7 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { - IOUtils.cleanup(null, dfs); + IOUtils.cleanupWithLogger(null, dfs); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithAcls.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithAcls.java index 49613ba57008a..38b79338312ed 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithAcls.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithAcls.java @@ -96,7 +96,7 @@ public static void init() throws Exception { @AfterClass public static void shutdown() { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java index e0e103bfe8019..b16f6b233405a 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java @@ -81,7 +81,7 @@ public static void init() throws Exception { @AfterClass public static void shutdown() { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java index 77d428b7d1031..36c6e6a5655c9 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java @@ -114,7 +114,7 @@ public static void init() throws Exception { @AfterClass public static void shutdown() { - IOUtils.cleanup(null, fs); + IOUtils.cleanupWithLogger(null, fs); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java index 1c92a9c5ef276..389fe367b04b7 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java @@ -72,7 +72,7 @@ private static void mkdirs(String path) throws Exception { recordInExpectedValues(path); } finally { - IOUtils.cleanup(null, fileSystem); + IOUtils.cleanupWithLogger(null, fileSystem); } } @@ -85,7 +85,7 @@ private static void touchFile(String path) throws Exception { recordInExpectedValues(path); } finally { - IOUtils.cleanup(null, fileSystem, outputStream); + IOUtils.cleanupWithLogger(null, fileSystem, outputStream); } } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java index bf3165765d9cd..780d82df2bce3 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java @@ -261,7 +261,7 @@ private static void touchFile(String path, boolean createMultipleBlocks, System.out.println(fileStatus.getReplication()); } finally { - IOUtils.cleanup(null, outputStream); + IOUtils.cleanupWithLogger(null, outputStream); } } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java index 5315137fde569..622e3916799eb 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java @@ -91,7 +91,7 @@ private static int createFile(String path, int fileSize) throws Exception { return size; } finally { - IOUtils.cleanup(null, fileSystem, outputStream); + IOUtils.cleanupWithLogger(null, fileSystem, outputStream); } } diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java index 61a1745975239..6a310ca2483cd 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java @@ -100,7 +100,7 @@ private static void createFile(String path) throws Exception { new Path(path))[0].getPath().toString()); } finally { - IOUtils.cleanup(null, fileSystem, outputStream); + IOUtils.cleanupWithLogger(null, fileSystem, outputStream); } } diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java index 2cb806e6a4bfc..f95c4b36a5cc1 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReadRecordFactory.java @@ -79,7 +79,7 @@ public float getProgress() throws IOException { @Override public void close() throws IOException { - IOUtils.cleanup(null, src); + IOUtils.cleanupWithLogger(null, src); factory.close(); } } diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java index bdbfc3beec7e3..fe3b5d36d9841 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java @@ -112,7 +112,7 @@ public void run() { } catch (InterruptedException e) { // exit thread; ignore any jobs remaining in the trace } finally { - IOUtils.cleanup(null, jobProducer); + IOUtils.cleanupWithLogger(null, jobProducer); } } } diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java index e8c7d61a64d4c..cb05ab63f1c07 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java @@ -143,7 +143,7 @@ public void run() { } catch (InterruptedException e) { return; } finally { - IOUtils.cleanup(null, jobProducer); + IOUtils.cleanupWithLogger(null, jobProducer); } } diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java index bd15c2999e59f..4e7fc9c2bbd80 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java @@ -247,7 +247,7 @@ public void run() { LOG.error("[STRESS] Interrupted in the main block!", e); return; } finally { - IOUtils.cleanup(null, jobProducer); + IOUtils.cleanupWithLogger(null, jobProducer); } } } diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Folder.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Folder.java index 424405aa7e647..683bcc80f1fa9 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Folder.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Folder.java @@ -470,7 +470,7 @@ public int compare(Pair p1, next = heap.poll(); } } finally { - IOUtils.cleanup(null, reader); + IOUtils.cleanupWithLogger(null, reader); if (outGen != null) { outGen.close(); } From 1ba4fa546581e519e2c79a3b15f76cb6bf1ad15d Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Tue, 6 Apr 2021 14:52:54 +0530 Subject: [PATCH 201/666] HDFS-15949. Fix integer overflow (#2857) Reviewed-by: Inigo Goiri Signed-off-by: Akira Ajisaka --- .../src/main/native/libhdfspp/tests/hdfs_ext_test.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc index e69ddb26af4fa..fb55172633af0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc @@ -453,11 +453,11 @@ TEST_F(HdfsExtTest, TestHosts) { EXPECT_EQ(0, errno); //Test invalid arguments - EXPECT_EQ(nullptr, hdfsGetHosts(fs, filename.c_str(), 0, std::numeric_limits::max()+1)); + EXPECT_EQ(nullptr, hdfsGetHosts(fs, filename.c_str(), 0, std::numeric_limits::min())); EXPECT_EQ((int) std::errc::invalid_argument, errno); //Test invalid arguments - EXPECT_EQ(nullptr, hdfsGetHosts(fs, filename.c_str(), std::numeric_limits::max()+1, std::numeric_limits::max())); + EXPECT_EQ(nullptr, hdfsGetHosts(fs, filename.c_str(), std::numeric_limits::min(), std::numeric_limits::max())); EXPECT_EQ((int) std::errc::invalid_argument, errno); } From 43970122c1f7eb47f1d0b35f25dd07ef235b802f Mon Sep 17 00:00:00 2001 From: "Boyina, Hemanth Kumar" Date: Tue, 6 Apr 2021 18:06:10 +0530 Subject: [PATCH 202/666] HADOOP-17588. CryptoInputStream#close() should be syncronized. Contributed by RenukaPrasad C --- .../main/java/org/apache/hadoop/crypto/CryptoInputStream.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java index 0c156e3548d21..3f8dc54bd4a32 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java @@ -317,7 +317,7 @@ private void resetStreamOffset(long offset) throws IOException { } @Override - public void close() throws IOException { + public synchronized void close() throws IOException { if (closed) { return; } From 5ea201d0f11d994b89932d8bff4b3e15aa7e29b5 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Tue, 6 Apr 2021 21:22:48 +0800 Subject: [PATCH 203/666] HDFS-15942. Increase Quota initialization threads. Contributed by Stephen O'Donnell. --- .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 +- .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index ac2896871e066..51fc564a97584 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -374,7 +374,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum"; public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1; public static final String DFS_NAMENODE_QUOTA_INIT_THREADS_KEY = "dfs.namenode.quota.init-threads"; - public static final int DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT = 4; + public static final int DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT = 12; public static final String DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD = "dfs.namenode.edit.log.autoroll.multiplier.threshold"; public static final float diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index c2ae4bc829109..a30889bdd0042 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3735,11 +3735,11 @@ dfs.namenode.quota.init-threads - 4 + 12 The number of concurrent threads to be used in quota initialization. The speed of quota initialization also affects the namenode fail-over latency. - If the size of name space is big, try increasing this. + If the size of name space is big, try increasing this to 16 or higher. From cca7ce6e3e2287e3afafa876daa45c49586dcf49 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Tue, 6 Apr 2021 21:53:21 +0530 Subject: [PATCH 204/666] HDFS-15909. Make fnmatch cross platform (#2792) --- .../native/libhdfspp/lib/fs/CMakeLists.txt | 5 +- .../native/libhdfspp/lib/fs/filesystem.cc | 12 ++--- .../native/libhdfspp/lib/x-platform/syscall.h | 11 +++++ .../libhdfspp/lib/x-platform/syscall_linux.cc | 6 +++ .../lib/x-platform/syscall_windows.cc | 9 ++++ .../libhdfspp/tests/x-platform/CMakeLists.txt | 7 +++ .../tests/x-platform/syscall_common_test.cc | 47 +++++++++++++++++++ .../tests/x-platform/syscall_nix_test.cc | 35 ++++++++++++++ .../tests/x-platform/syscall_win_test.cc | 35 ++++++++++++++ 9 files changed, 159 insertions(+), 8 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc create mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_nix_test.cc create mode 100644 hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_win_test.cc diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/CMakeLists.txt index 624cda54b1495..d6ea248acd9f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/CMakeLists.txt @@ -16,6 +16,7 @@ # limitations under the License. # -add_library(fs_obj OBJECT filesystem.cc filesystem_sync.cc filehandle.cc bad_datanode_tracker.cc namenode_operations.cc) +add_library(fs_obj OBJECT $ filesystem.cc filesystem_sync.cc filehandle.cc bad_datanode_tracker.cc namenode_operations.cc) +target_include_directories(fs_obj PRIVATE ../lib) add_dependencies(fs_obj proto) -add_library(fs $) +add_library(fs $ $) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc index ba75e86eec78d..741d6c783b62e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc @@ -25,12 +25,12 @@ #include #include #include -#include #include -#include #include +#include "x-platform/syscall.h" + #define FMT_THIS_ADDR "this=" << (void*)this namespace hdfs { @@ -722,8 +722,8 @@ void FileSystemImpl::FindShim(const Status &stat, const std::vector & for (StatInfo const& si : stat_infos) { //If we are at the last depth and it matches both path and name, we need to output it. if (operational_state->depth == shared_state->dirs.size() - 2 - && !fnmatch(shared_state->dirs[operational_state->depth + 1].c_str(), si.path.c_str(), 0) - && !fnmatch(shared_state->name.c_str(), si.path.c_str(), 0)) { + && XPlatform::Syscall::FnMatch(shared_state->dirs[operational_state->depth + 1], si.path) + && XPlatform::Syscall::FnMatch(shared_state->name, si.path)) { outputs.push_back(si); } //Skip if not directory @@ -731,7 +731,7 @@ void FileSystemImpl::FindShim(const Status &stat, const std::vector & continue; } //Checking for a match with the path at the current depth - if(!fnmatch(shared_state->dirs[operational_state->depth + 1].c_str(), si.path.c_str(), 0)){ + if(XPlatform::Syscall::FnMatch(shared_state->dirs[operational_state->depth + 1], si.path)) { //Launch a new requests for every matched directory shared_state->outstanding_requests++; auto callback = [this, si, operational_state, shared_state](const Status &stat, const std::vector & stat_infos, bool has_more) { @@ -755,7 +755,7 @@ void FileSystemImpl::FindShim(const Status &stat, const std::vector & nn_.GetListing(si.full_path, callback); } //All names that match the specified name are saved to outputs - if(!fnmatch(shared_state->name.c_str(), si.path.c_str(), 0)){ + if(XPlatform::Syscall::FnMatch(shared_state->name, si.path)) { outputs.push_back(si); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h index d162f6ff5137b..297acebfc5c8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h @@ -48,6 +48,17 @@ class Syscall { */ static int WriteToStdout(const char* message); + /** + * Checks whether the {@link str} argument matches the {@link pattern} + * argument, which is a shell wildcard pattern. + * + * @param pattern The wildcard pattern to use. + * @param str The string to match. + * @returns A boolean indicating whether the given {@link str} + * matches {@link pattern}. + */ + static bool FnMatch(const std::string& pattern, const std::string& str); + private: static bool WriteToStdoutImpl(const char* message); }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc index e556d9976e8ad..2c51dbfddfca6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc @@ -16,6 +16,7 @@ * limitations under the License. */ +#include #include #include @@ -30,6 +31,11 @@ int XPlatform::Syscall::WriteToStdout(const char* message) { return WriteToStdoutImpl(message) ? 1 : 0; } +bool XPlatform::Syscall::FnMatch(const std::string& pattern, + const std::string& str) { + return fnmatch(pattern.c_str(), str.c_str(), 0) == 0; +} + bool XPlatform::Syscall::WriteToStdoutImpl(const char* message) { const auto message_len = strlen(message); const auto result = write(1, message, message_len); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc index 06b0031d88033..dc9ba63634f1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc @@ -16,10 +16,13 @@ * limitations under the License. */ +#include #include #include "syscall.h" +#pragma comment(lib, "Shlwapi.lib") + bool XPlatform::Syscall::WriteToStdout(const std::string& message) { return WriteToStdoutImpl(message.c_str()); } @@ -28,6 +31,12 @@ int XPlatform::Syscall::WriteToStdout(const char* message) { return WriteToStdoutImpl(message) ? 1 : 0; } +bool XPlatform::Syscall::FnMatch(const std::string& pattern, + const std::string& str) { + return PathMatchSpecA(static_cast(str.c_str()), + static_cast(pattern.c_str())) == TRUE; +} + bool XPlatform::Syscall::WriteToStdoutImpl(const char* message) { auto* const stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); if (stdout_handle == INVALID_HANDLE_VALUE || stdout_handle == nullptr) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/CMakeLists.txt index ac9f8fbd8cade..6a7d0bec37ed4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/CMakeLists.txt @@ -18,9 +18,16 @@ if(WIN32) add_executable(x_platform_utils_test $ utils_common_test.cc utils_test_main.cc utils_win_test.cc) + add_executable(x_platform_syscall_test $ syscall_common_test.cc utils_test_main.cc syscall_win_test.cc) else(WIN32) add_executable(x_platform_utils_test $ utils_common_test.cc utils_test_main.cc utils_nix_test.cc) + add_executable(x_platform_syscall_test $ syscall_common_test.cc utils_test_main.cc syscall_nix_test.cc) endif(WIN32) + target_include_directories(x_platform_utils_test PRIVATE ${LIBHDFSPP_LIB_DIR}) target_link_libraries(x_platform_utils_test gmock_main) add_test(x_platform_utils_test x_platform_utils_test) + +target_include_directories(x_platform_syscall_test PRIVATE ${LIBHDFSPP_LIB_DIR}) +target_link_libraries(x_platform_syscall_test gmock_main) +add_test(x_platform_syscall_test x_platform_syscall_test) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc new file mode 100644 index 0000000000000..04da29a33fb5c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include "x-platform/syscall.h" + +TEST(XPlatformSyscall, FnMatchBasicAsterisk) { + const std::string pattern("a*.doc"); + const std::string str("abcd.doc"); + EXPECT_TRUE(XPlatform::Syscall::FnMatch(pattern, str)); +} + +TEST(XPlatformSyscall, FnMatchBasicQuestionMark) { + const std::string pattern("a?.doc"); + const std::string str("ab.doc"); + EXPECT_TRUE(XPlatform::Syscall::FnMatch(pattern, str)); +} + +TEST(XPlatformSyscall, FnMatchNegativeAsterisk) { + const std::string pattern("a*.doc"); + const std::string str("bcd.doc"); + EXPECT_FALSE(XPlatform::Syscall::FnMatch(pattern, str)); +} + +TEST(XPlatformSyscall, FnMatchNegativeQuestionMark) { + const std::string pattern("a?.doc"); + const std::string str("abc.doc"); + EXPECT_FALSE(XPlatform::Syscall::FnMatch(pattern, str)); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_nix_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_nix_test.cc new file mode 100644 index 0000000000000..f2c753f080e4f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_nix_test.cc @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include "x-platform/syscall.h" + +TEST(XPlatformSyscall, FnMatchBasicPath) { + const std::string pattern("*.doc"); + const std::string str("some/path/abcd.doc"); + EXPECT_TRUE(XPlatform::Syscall::FnMatch(pattern, str)); +} + +TEST(XPlatformSyscall, FnMatchNegativePath) { + const std::string pattern("x*.doc"); + const std::string str("y/abcd.doc"); + EXPECT_FALSE(XPlatform::Syscall::FnMatch(pattern, str)); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_win_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_win_test.cc new file mode 100644 index 0000000000000..2d7393f4e0d7f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_win_test.cc @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include "x-platform/syscall.h" + +TEST(XPlatformSyscall, FnMatchBasicPath) { + const std::string pattern("*.doc"); + const std::string str(R"(some\path\abcd.doc)"); + EXPECT_TRUE(XPlatform::Syscall::FnMatch(pattern, str)); +} + +TEST(XPlatformSyscall, FnMatchNegativePath) { + const std::string pattern("x*.doc"); + const std::string str(R"(y\abcd.doc)"); + EXPECT_FALSE(XPlatform::Syscall::FnMatch(pattern, str)); +} From c3de1b4529cfefb237a2f251f0b30081f184c8a3 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Wed, 7 Apr 2021 12:18:23 +0800 Subject: [PATCH 205/666] HADOOP-17613. Log not flushed fully when daemon shutdown. Contributed by Renukaprasad C. --- .../src/main/java/org/apache/hadoop/util/StringUtils.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index 9e7b36f71e211..4fe09a9ed7e6a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -41,6 +41,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import org.apache.hadoop.net.NetUtils; +import org.apache.log4j.LogManager; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; @@ -752,6 +753,7 @@ static void startupShutdownMessage(Class clazz, String[] args, public void run() { LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ "Shutting down " + classname + " at " + hostname})); + LogManager.shutdown(); } }, SHUTDOWN_HOOK_PRIORITY); From 3646abff66b472f2c2d58e60a4d7ff00907bb446 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Tue, 6 Apr 2021 21:52:23 -0700 Subject: [PATCH 206/666] HADOOP-17624. Remove any rocksdb exclusion code. (#2867) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Márton Elek Reviewed-by: Viraj Jasani --- .../test/resources/ensure-jars-have-correct-contents.sh | 7 ------- hadoop-client-modules/hadoop-client-minicluster/pom.xml | 7 ------- 2 files changed, 14 deletions(-) diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh b/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh index d77424e6b7899..0dbfefbf4f16d 100644 --- a/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh +++ b/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh @@ -58,13 +58,6 @@ allowed_expr+="|^org.apache.hadoop.application-classloader.properties$" allowed_expr+="|^java.policy$" # * Used by javax.annotation allowed_expr+="|^jndi.properties$" -# * allowing native libraries from rocksdb. Leaving native libraries as it is. -allowed_expr+="|^librocksdbjni-linux32.so" -allowed_expr+="|^librocksdbjni-linux64.so" -allowed_expr+="|^librocksdbjni-osx.jnilib" -allowed_expr+="|^librocksdbjni-win64.dll" -allowed_expr+="|^librocksdbjni-linux-ppc64le.so" - allowed_expr+=")" declare -i bad_artifacts=0 diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index 48dc65dc7cb04..a35d832a76c04 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -760,13 +760,6 @@ xml.xsd - - - org.rocksdb:rocksdbjni - - HISTORY-JAVA.md - - org.eclipse.jetty:* From 9f77e7d02f427528a2206cba45d4d21e62d11cf4 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Tue, 6 Apr 2021 21:55:01 -0700 Subject: [PATCH 207/666] HADOOP-17621. hadoop-auth to remove jetty-server dependency. (#2865) Reviewed-by: Akira Ajisaka --- hadoop-common-project/hadoop-auth/pom.xml | 6 +----- .../authentication/server/AuthenticationFilter.java | 8 ++------ 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 8e8526c7450e0..89d54198feeec 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -193,11 +193,7 @@ guava test - - org.eclipse.jetty - jetty-server - - + diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java index 9f40c42d24135..b339a5d5a2450 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java @@ -19,7 +19,6 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import org.apache.hadoop.security.authentication.util.*; -import org.eclipse.jetty.server.Response; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -621,7 +620,7 @@ && getMaxInactiveInterval() > 0) { errCode = HttpServletResponse.SC_FORBIDDEN; } // After Jetty 9.4.21, sendError() no longer allows a custom message. - // use setStatusWithReason() to set a custom message. + // use setStatus() to set a custom message. String reason; if (authenticationEx == null) { reason = "Authentication required"; @@ -629,10 +628,7 @@ && getMaxInactiveInterval() > 0) { reason = authenticationEx.getMessage(); } - if (httpResponse instanceof Response) { - ((Response)httpResponse).setStatusWithReason(errCode, reason); - } - + httpResponse.setStatus(errCode, reason); httpResponse.sendError(errCode, reason); } } From e26883ccde9804b807dea17d9881bc7d8fbbc6c8 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Wed, 7 Apr 2021 11:52:21 +0200 Subject: [PATCH 208/666] YARN-10714. Remove dangling dynamic queues on reinitialization. Contributed by Andras Gyori --- .../CapacitySchedulerQueueManager.java | 43 ++++++++++--- ...CapacitySchedulerNewQueueAutoCreation.java | 61 +++++++++++++++++++ 2 files changed, 97 insertions(+), 7 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java index 00d1cda30886e..5cd14908e8914 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java @@ -27,8 +27,6 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.yarn.util.resource.ResourceUtils; -import org.apache.hadoop.yarn.util.resource.Resources; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -340,11 +338,14 @@ private void updateQueues(CSQueueStore existingQueues, } for (CSQueue queue : existingQueues.getQueues()) { - if (!((AbstractCSQueue) queue).isDynamicQueue() && newQueues.get( - queue.getQueuePath()) == null && !( - queue instanceof AutoCreatedLeafQueue && conf - .isAutoCreateChildQueueEnabled( - queue.getParent().getQueuePath()))) { + boolean isDanglingDynamicQueue = isDanglingDynamicQueue( + newQueues, existingQueues, queue); + boolean isRemovable = isDanglingDynamicQueue || !isDynamicQueue(queue) + && newQueues.get(queue.getQueuePath()) == null + && !(queue instanceof AutoCreatedLeafQueue && + conf.isAutoCreateChildQueueEnabled(queue.getParent().getQueuePath())); + + if (isRemovable) { existingQueues.remove(queue); } } @@ -435,4 +436,32 @@ private Map> getQueueToLabels() { getQueueStateManager() { return this.queueStateManager; } + + private boolean isDynamicQueue(CSQueue queue) { + return (queue instanceof AbstractCSQueue) && + ((AbstractCSQueue) queue).isDynamicQueue(); + } + + private boolean isDanglingDynamicQueue( + CSQueueStore newQueues, CSQueueStore existingQueues, + CSQueue queue) { + if (!isDynamicQueue(queue)) { + return false; + } + if (queue.getParent() == null) { + return true; + } + if (newQueues.get(queue.getParent().getQueuePath()) != null) { + return false; + } + CSQueue parent = existingQueues.get(queue.getParent().getQueuePath()); + if (parent == null) { + return true; + } + // A dynamic queue is dangling, if its parent is not parsed in newQueues + // or if its parent is not a dynamic queue. Dynamic queues are not parsed in + // newQueues but they are deleted automatically, so it is safe to assume + // that existingQueues contain valid dynamic queues. + return !isDynamicQueue(parent); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java index c403d23f07b98..4dae4fd64eb3d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java @@ -22,6 +22,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; @@ -968,6 +969,66 @@ public void testQueueInfoIfAmbiguousQueueNames() throws Exception { bAutoLeafQueue.getQueueInfo().getQueueName()); } + @Test + public void testRemoveDanglingAutoCreatedQueuesOnReinit() throws Exception { + startScheduler(); + + // Validate static parent deletion + createQueue("root.a.a-auto"); + AbstractCSQueue aAuto = (AbstractCSQueue) cs. + getQueue("root.a.a-auto"); + Assert.assertTrue(aAuto.isDynamicQueue()); + + csConf.setState("root.a", QueueState.STOPPED); + cs.reinitialize(csConf, mockRM.getRMContext()); + aAuto = (AbstractCSQueue) cs. + getQueue("root.a.a-auto"); + Assert.assertEquals("root.a.a-auto is not in STOPPED state", QueueState.STOPPED, aAuto.getState()); + csConf.setQueues("root", new String[]{"b"}); + cs.reinitialize(csConf, mockRM.getRMContext()); + CSQueue aAutoNew = cs.getQueue("root.a.a-auto"); + Assert.assertNull(aAutoNew); + + submitApp(cs, USER0, "a-auto", "root.a"); + aAutoNew = cs.getQueue("root.a.a-auto"); + Assert.assertNotNull(aAutoNew); + + // Validate static grandparent deletion + csConf.setQueues("root", new String[]{"a", "b"}); + csConf.setQueues("root.a", new String[]{"a1"}); + csConf.setAutoQueueCreationV2Enabled("root.a.a1", true); + cs.reinitialize(csConf, mockRM.getRMContext()); + + createQueue("root.a.a1.a1-auto"); + CSQueue a1Auto = cs.getQueue("root.a.a1.a1-auto"); + Assert.assertNotNull("a1-auto should exist", a1Auto); + + csConf.setQueues("root", new String[]{"b"}); + cs.reinitialize(csConf, mockRM.getRMContext()); + a1Auto = cs.getQueue("root.a.a1.a1-auto"); + Assert.assertNull("a1-auto has no parent and should not exist", a1Auto); + + // Validate dynamic parent deletion + csConf.setState("root.b", QueueState.STOPPED); + cs.reinitialize(csConf, mockRM.getRMContext()); + csConf.setAutoQueueCreationV2Enabled("root.b", true); + cs.reinitialize(csConf, mockRM.getRMContext()); + + createQueue("root.b.b-auto-parent.b-auto-leaf"); + CSQueue bAutoParent = cs.getQueue("root.b.b-auto-parent"); + Assert.assertNotNull("b-auto-parent should exist", bAutoParent); + ParentQueue b = (ParentQueue) cs.getQueue("root.b"); + b.removeChildQueue(bAutoParent); + + cs.reinitialize(csConf, mockRM.getRMContext()); + + bAutoParent = cs.getQueue("root.b.b-auto-parent"); + Assert.assertNull("b-auto-parent should not exist ", bAutoParent); + CSQueue bAutoLeaf = cs.getQueue("root.b.b-auto-parent.b-auto-leaf"); + Assert.assertNull("b-auto-leaf should not exist " + + "when its dynamic parent is removed", bAutoLeaf); + } + protected LeafQueue createQueue(String queuePath) throws YarnException { return autoQueueHandler.autoCreateQueue( CSQueueUtils.extractQueuePath(queuePath)); From d45d5373db87fdea4bd2e2c27a1f56f71b2de76b Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Wed, 7 Apr 2021 15:36:25 +0530 Subject: [PATCH 209/666] HDFS-15948. Fix test4tests for libhdfspp (#2873) Signed-off-by: Akira Ajisaka --- dev-support/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index d2266c5530bc2..f5305429264cd 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -35,7 +35,7 @@ pipeline { DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile" YETUS='yetus' // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION='11eb9b09786e401fbdeaa3be83a19a4066fd7813' + YETUS_VERSION='f9ba0170a5787a5f4662d3769804fef0226a182f' } parameters { From c4f56ea36004b005052b7e90fc174ed1130d041b Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Wed, 7 Apr 2021 23:49:17 +0530 Subject: [PATCH 210/666] HADOOP-17617. Incorrect representation of RESPONSE for Get Key Version in KMS index.md.vm file. Contributed by Ravuri Sushma sree --- .../hadoop-kms/src/site/markdown/index.md.vm | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm index 95e926b3561a1..d7599de556900 100644 --- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm +++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm @@ -1055,7 +1055,8 @@ $H4 Get Key Version Content-Type: application/json { - "name" : "versionName", + "name" : "", + "versionName" : "", "material" : "", //base64 } @@ -1072,11 +1073,13 @@ $H4 Get Key Versions [ { - "name" : "versionName", + "name" : "", + "versionName" : "", "material" : "", //base64 }, { - "name" : "versionName", + "name" : "", + "versionName" : "", "material" : "", //base64 }, ... From 1eb94a57edb671df8ce2f39cfda3906ac89cd235 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 7 Apr 2021 23:25:11 -0700 Subject: [PATCH 211/666] HADOOP-17625. Update to Jetty 9.4.39. (#2870) Reviewed-by: cxorm --- hadoop-project/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 0c73a74b14a9f..aeee30ab9433d 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -37,7 +37,7 @@ true true - 9.4.35.v20201120 + 9.4.39.v20210325 _ _ From e39f8d68a42af4076e0fa6deddcc5795813465f7 Mon Sep 17 00:00:00 2001 From: Sungpeo Kook Date: Thu, 8 Apr 2021 15:58:10 +0900 Subject: [PATCH 212/666] MAPREDUCE-7270. TestHistoryViewerPrinter could be failed when the locale isn't English. (#1942) Contributed by Sungpeo Kook. Signed-off-by: Mingliang Liu --- .../jobhistory/TestHistoryViewerPrinter.java | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java index f0f713a98211d..cb508f6d29b9d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java @@ -24,7 +24,9 @@ import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.mapreduce.TaskType; +import org.junit.AfterClass; import org.junit.Assert; +import org.junit.BeforeClass; import org.junit.Test; import org.skyscreamer.jsonassert.JSONAssert; import org.skyscreamer.jsonassert.JSONCompareMode; @@ -35,6 +37,7 @@ import java.io.PrintStream; import java.util.HashMap; import java.util.TimeZone; +import java.util.Locale; public class TestHistoryViewerPrinter { @@ -43,6 +46,18 @@ public class TestHistoryViewerPrinter { private final String LINE_SEPARATOR = System.lineSeparator(); + private static final Locale DEFAULT_LOCALE = Locale.getDefault(); + + @BeforeClass + public static void setUp() throws Exception { + Locale.setDefault(Locale.ENGLISH); + } + + @AfterClass + public static void tearDown() throws Exception { + Locale.setDefault(DEFAULT_LOCALE); + } + @Test public void testHumanPrinter() throws Exception { JobHistoryParser.JobInfo job = createJobInfo(); From 105e3b53676b2d3e42c77b2bca3fbc6a390fb4d6 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Thu, 8 Apr 2021 12:42:48 +0200 Subject: [PATCH 213/666] YARN-10564. Support Auto Queue Creation template configurations. Contributed by Andras Gyori. --- .../scheduler/capacity/AbstractCSQueue.java | 35 ++--- .../capacity/AutoCreatedQueueTemplate.java | 133 ++++++++++++++++++ .../CapacitySchedulerConfiguration.java | 2 +- .../scheduler/capacity/ParentQueue.java | 27 +++- .../TestAutoCreatedQueueTemplate.java | 116 +++++++++++++++ ...CapacitySchedulerNewQueueAutoCreation.java | 80 +++++++++++ 6 files changed, 375 insertions(+), 18 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 06575be4c7a3e..250fcc716d645 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -18,25 +18,15 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import java.io.IOException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueACL; @@ -58,13 +48,13 @@ import org.apache.hadoop.yarn.security.YarnAuthorizationProvider; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.AbsoluteResourceType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueResourceQuotas; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.AbsoluteResourceType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer; @@ -74,8 +64,17 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.locks.ReentrantReadWriteLock; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.UNDEFINED; @@ -361,6 +360,10 @@ protected void setupQueueConfigs(Resource clusterResource, writeLock.lock(); try { + if (isDynamicQueue() && getParent() instanceof ParentQueue) { + ((ParentQueue) getParent()).getAutoCreatedQueueTemplate() + .setTemplateEntriesForChild(configuration, getQueuePath()); + } // get labels this.accessibleLabels = configuration.getAccessibleNodeLabels(getQueuePath()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java new file mode 100644 index 0000000000000..6c516c04770b5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.AUTO_QUEUE_CREATION_V2_PREFIX; + +/** + * A handler for storing and setting auto created queue template settings. + */ +public class AutoCreatedQueueTemplate { + public static final String AUTO_QUEUE_TEMPLATE_PREFIX = + AUTO_QUEUE_CREATION_V2_PREFIX + "template."; + private static final String WILDCARD_QUEUE = "*"; + private static final int MAX_WILDCARD_LEVEL = 1; + + private final Map templateProperties = new HashMap<>(); + + public AutoCreatedQueueTemplate(Configuration configuration, + String queuePath) { + setTemplateConfigEntries(configuration, queuePath); + } + + @VisibleForTesting + public static String getAutoQueueTemplatePrefix(String queue) { + return CapacitySchedulerConfiguration.getQueuePrefix(queue) + + AUTO_QUEUE_TEMPLATE_PREFIX; + } + + /** + * Get the template properties attached to a parent queue. + * @return template property names and values + */ + public Map getTemplateProperties() { + return templateProperties; + } + + /** + * Sets the configuration properties of a child queue based on its parent + * template settings. + * @param conf configuration to set + * @param childQueuePath child queue path used for prefixing the properties + */ + public void setTemplateEntriesForChild(Configuration conf, + String childQueuePath) { + // Get all properties that are explicitly set + Set alreadySetProps = conf.getPropsWithPrefix( + CapacitySchedulerConfiguration.getQueuePrefix(childQueuePath)).keySet(); + + for (Map.Entry entry : templateProperties.entrySet()) { + // Do not overwrite explicitly configured properties + if (alreadySetProps.contains(entry.getKey())) { + continue; + } + conf.set(CapacitySchedulerConfiguration.getQueuePrefix( + childQueuePath) + entry.getKey(), entry.getValue()); + } + } + + /** + * Store the template configuration properties. Explicit templates always take + * precedence over wildcard values. An example template precedence + * hierarchy for root.a ParentQueue from highest to lowest: + * yarn.scheduler.capacity.root.a.auto-queue-creation-v2.template.capacity + * yarn.scheduler.capacity.root.*.auto-queue-creation-v2.template.capacity + */ + private void setTemplateConfigEntries(Configuration configuration, + String queuePath) { + List queuePathParts = new ArrayList<>(Arrays.asList( + queuePath.split("\\."))); + + if (queuePathParts.size() <= 1) { + // This is either root or an empty queue name + return; + } + int queuePathMaxIndex = queuePathParts.size() - 1; + + // start with the most explicit format (without wildcard) + int wildcardLevel = 0; + // root can not be wildcarded + // MAX_WILDCARD_LEVEL will be configurable in the future + int supportedWildcardLevel = Math.min(queuePathMaxIndex - 1, + MAX_WILDCARD_LEVEL); + + + // Collect all template entries + while (wildcardLevel <= supportedWildcardLevel) { + // Get all config entries with the specified prefix + String templateQueuePath = String.join(".", queuePathParts); + // Get all configuration entries with + // .auto-queue-creation-v2.template prefix + Map props = configuration.getPropsWithPrefix( + getAutoQueueTemplatePrefix(templateQueuePath)); + + for (Map.Entry entry : props.entrySet()) { + // If an entry is already present, it had a higher precedence + templateProperties.putIfAbsent(entry.getKey(), entry.getValue()); + } + + // Replace a queue part with a wildcard based on the wildcard level + // eg. root.a -> root.* + int queuePartToWildcard = queuePathMaxIndex - wildcardLevel; + queuePathParts.set(queuePartToWildcard, WILDCARD_QUEUE); + + ++wildcardLevel; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 5bae2b375f1a8..266cbb4d34601 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -2029,7 +2029,7 @@ public void setDefaultLifetimePerQueue(String queue, long defaultLifetime) { AUTO_CREATE_CHILD_QUEUE_PREFIX + "enabled"; @Private - private static final String AUTO_QUEUE_CREATION_V2_PREFIX = + protected static final String AUTO_QUEUE_CREATION_V2_PREFIX = "auto-queue-creation-v2."; @Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 3d28933141359..798c71037845d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -30,6 +30,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException; import org.slf4j.Logger; @@ -101,6 +102,8 @@ public class ParentQueue extends AbstractCSQueue { private final boolean allowZeroCapacitySum; + private AutoCreatedQueueTemplate autoCreatedQueueTemplate; + // effective min ratio per resource, it is used during updateClusterResource, // leaf queue can use this to calculate effective resources. // This field will not be edited, reference will point to a new immutable map @@ -152,6 +155,8 @@ protected void setupQueueConfigs(Resource clusterResource, throws IOException { writeLock.lock(); try { + autoCreatedQueueTemplate = new AutoCreatedQueueTemplate( + csConf, getQueuePath()); super.setupQueueConfigs(clusterResource, csConf); StringBuilder aclsString = new StringBuilder(); for (Map.Entry e : acls.entrySet()) { @@ -477,6 +482,8 @@ private CapacitySchedulerConfiguration getConfForAutoCreatedQueue( CapacitySchedulerConfiguration dupCSConfig = new CapacitySchedulerConfiguration( csContext.getConfiguration(), false); + autoCreatedQueueTemplate.setTemplateEntriesForChild(dupCSConfig, + childQueuePath); if (isLeaf) { // set to -1, to disable it dupCSConfig.setUserLimitFactor(childQueuePath, -1); @@ -647,6 +654,18 @@ public void reinitialize(CSQueue newlyParsedQueue, Map currentChildQueues = getQueuesMap(childQueues); Map newChildQueues = getQueuesMap( newlyParsedParentQueue.childQueues); + + // Reinitialize dynamic queues as well, because they are not parsed + for (String queue : Sets.difference(currentChildQueues.keySet(), + newChildQueues.keySet())) { + CSQueue candidate = currentChildQueues.get(queue); + if (candidate instanceof AbstractCSQueue) { + if (((AbstractCSQueue) candidate).isDynamicQueue()) { + candidate.reinitialize(candidate, clusterResource); + } + } + } + for (Map.Entry e : newChildQueues.entrySet()) { String newChildQueueName = e.getKey(); CSQueue newChildQueue = e.getValue(); @@ -1217,7 +1236,9 @@ public void updateClusterResource(Resource clusterResource, // For dynamic queue, we will set weight to 1 every time, because it // is possible new labels added to the parent. if (((AbstractCSQueue) queue).isDynamicQueue()) { - queue.getQueueCapacities().setWeight(nodeLabel, 1f); + if (queue.getQueueCapacities().getWeight(nodeLabel) == -1f) { + queue.getQueueCapacities().setWeight(nodeLabel, 1f); + } } } } @@ -1637,4 +1658,8 @@ public boolean isEligibleForAutoDeletion() { csContext.getConfiguration(). isAutoExpiredDeletionEnabled(this.getQueuePath()); } + + public AutoCreatedQueueTemplate getAutoCreatedQueueTemplate() { + return autoCreatedQueueTemplate; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java new file mode 100644 index 0000000000000..1c021f6efb04d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class TestAutoCreatedQueueTemplate { + private static final String TEST_QUEUE_ABC = "root.a.b.c"; + private static final String TEST_QUEUE_AB = "root.a.b"; + private static final String TEST_QUEUE_A = "root.a"; + private static final String ROOT = "root"; + private CapacitySchedulerConfiguration conf; + + @Before + public void setUp() throws Exception { + conf = new CapacitySchedulerConfiguration(); + conf.setQueues("root", new String[]{"a"}); + conf.setQueues("a", new String[]{"b"}); + conf.setQueues("b", new String[]{"c"}); + + } + + @Test + public void testNonWildCardTemplate() { + conf.set(getTemplateKey(TEST_QUEUE_AB, "capacity"), "6w"); + AutoCreatedQueueTemplate template = + new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB); + template.setTemplateEntriesForChild(conf, TEST_QUEUE_ABC); + + Assert.assertEquals("weight is not set", 6f, + conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6); + + } + + @Test + public void testOneLevelWildcardTemplate() { + conf.set(getTemplateKey("root.a.*", "capacity"), "6w"); + AutoCreatedQueueTemplate template = + new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB); + template.setTemplateEntriesForChild(conf, TEST_QUEUE_ABC); + + Assert.assertEquals("weight is not set", 6f, + conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6); + + } + + @Test + public void testIgnoredWhenRootWildcarded() { + conf.set(getTemplateKey("*", "capacity"), "6w"); + AutoCreatedQueueTemplate template = + new AutoCreatedQueueTemplate(conf, ROOT); + template.setTemplateEntriesForChild(conf, TEST_QUEUE_A); + + Assert.assertEquals("weight is set", -1f, + conf.getNonLabeledQueueWeight(TEST_QUEUE_A), 10e-6); + } + + @Test + public void testIgnoredWhenNoParent() { + conf.set(getTemplateKey("root", "capacity"), "6w"); + AutoCreatedQueueTemplate template = + new AutoCreatedQueueTemplate(conf, ROOT); + template.setTemplateEntriesForChild(conf, ROOT); + + Assert.assertEquals("weight is set", -1f, + conf.getNonLabeledQueueWeight(ROOT), 10e-6); + } + + @Test + public void testTemplatePrecedence() { + conf.set(getTemplateKey("root.a.b", "capacity"), "6w"); + conf.set(getTemplateKey("root.a.*", "capacity"), "4w"); + conf.set(getTemplateKey("root.*.*", "capacity"), "2w"); + + AutoCreatedQueueTemplate template = + new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB); + template.setTemplateEntriesForChild(conf, TEST_QUEUE_ABC); + + Assert.assertEquals( + "explicit template does not have the highest precedence", 6f, + conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6); + + CapacitySchedulerConfiguration newConf = + new CapacitySchedulerConfiguration(); + newConf.set(getTemplateKey("root.a.*", "capacity"), "4w"); + template = + new AutoCreatedQueueTemplate(newConf, TEST_QUEUE_AB); + template.setTemplateEntriesForChild(newConf, TEST_QUEUE_ABC); + + Assert.assertEquals("precedence is invalid", 4f, + newConf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6); + } + + private String getTemplateKey(String queuePath, String entryKey) { + return CapacitySchedulerConfiguration.getQueuePrefix(queuePath) + + AutoCreatedQueueTemplate.AUTO_QUEUE_TEMPLATE_PREFIX + entryKey; + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java index 4dae4fd64eb3d..c514fc7af2a04 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; +import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -647,6 +648,85 @@ public void testAutoCreateQueueMaxQueuesLimit() throws Exception { } } + @Test + public void testAutoCreatedQueueTemplateConfig() throws Exception { + startScheduler(); + csConf.set(AutoCreatedQueueTemplate.getAutoQueueTemplatePrefix( + "root.a.*") + "capacity", "6w"); + cs.reinitialize(csConf, mockRM.getRMContext()); + + LeafQueue a2 = createQueue("root.a.a-auto.a2"); + Assert.assertEquals("weight is not set by template", 6f, + a2.getQueueCapacities().getWeight(), 1e-6); + + cs.reinitialize(csConf, mockRM.getRMContext()); + a2 = (LeafQueue) cs.getQueue("root.a.a-auto.a2"); + Assert.assertEquals("weight is overridden", 6f, + a2.getQueueCapacities().getWeight(), 1e-6); + + csConf.setNonLabeledQueueWeight("root.a.a-auto.a2", 4f); + cs.reinitialize(csConf, mockRM.getRMContext()); + Assert.assertEquals("weight is not explicitly set", 4f, + a2.getQueueCapacities().getWeight(), 1e-6); + } + + @Test + public void testAutoCreatedQueueConfigChange() throws Exception { + startScheduler(); + LeafQueue a2 = createQueue("root.a.a-auto.a2"); + csConf.setNonLabeledQueueWeight("root.a.a-auto.a2", 4f); + cs.reinitialize(csConf, mockRM.getRMContext()); + + Assert.assertEquals("weight is not explicitly set", 4f, + a2.getQueueCapacities().getWeight(), 1e-6); + + a2 = (LeafQueue) cs.getQueue("root.a.a-auto.a2"); + csConf.setState("root.a.a-auto.a2", QueueState.STOPPED); + cs.reinitialize(csConf, mockRM.getRMContext()); + Assert.assertEquals("root.a.a-auto.a2 has not been stopped", + QueueState.STOPPED, a2.getState()); + + csConf.setState("root.a.a-auto.a2", QueueState.RUNNING); + cs.reinitialize(csConf, mockRM.getRMContext()); + Assert.assertEquals("root.a.a-auto.a2 is not running", + QueueState.RUNNING, a2.getState()); + } + + @Test + public void testAutoCreateQueueState() throws Exception { + startScheduler(); + + createQueue("root.e.e1"); + csConf.setState("root.e", QueueState.STOPPED); + csConf.setState("root.e.e1", QueueState.STOPPED); + csConf.setState("root.a", QueueState.STOPPED); + cs.reinitialize(csConf, mockRM.getRMContext()); + + // Make sure the static queue is stopped + Assert.assertEquals(cs.getQueue("root.a").getState(), + QueueState.STOPPED); + // If not set, default is the queue state of parent + Assert.assertEquals(cs.getQueue("root.a.a1").getState(), + QueueState.STOPPED); + + Assert.assertEquals(cs.getQueue("root.e").getState(), + QueueState.STOPPED); + Assert.assertEquals(cs.getQueue("root.e.e1").getState(), + QueueState.STOPPED); + + // Make root.e state to RUNNING + csConf.setState("root.e", QueueState.RUNNING); + cs.reinitialize(csConf, mockRM.getRMContext()); + Assert.assertEquals(cs.getQueue("root.e.e1").getState(), + QueueState.STOPPED); + + // Make root.e.e1 state to RUNNING + csConf.setState("root.e.e1", QueueState.RUNNING); + cs.reinitialize(csConf, mockRM.getRMContext()); + Assert.assertEquals(cs.getQueue("root.e.e1").getState(), + QueueState.RUNNING); + } + @Test public void testAutoQueueCreationDepthLimitFromStaticParent() throws Exception { From bb7dfcb1652c9c5a77fcfe739de2af86c1657fc8 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Thu, 8 Apr 2021 11:59:02 +0100 Subject: [PATCH 214/666] HDFS-15937. Reduce memory used during datanode layout upgrade. Contributed by Stephen O'Donnell (#2838) --- .../hdfs/server/datanode/DataStorage.java | 98 ++++++++++++------- 1 file changed, 62 insertions(+), 36 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 4e216db892f67..03e99864a07f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -1071,12 +1071,26 @@ private static void linkAllBlocks(File fromDir, File fromBbwDir, File toDir, } private static class LinkArgs { - File src; - File dst; + private File srcDir; + private File dstDir; + private String blockFile; + + LinkArgs(File srcDir, File dstDir, String blockFile) { + this.srcDir = srcDir; + this.dstDir = dstDir; + this.blockFile = blockFile; + } + + public File src() { + return new File(srcDir, blockFile); + } - LinkArgs(File src, File dst) { - this.src = src; - this.dst = dst; + public File dst() { + return new File(dstDir, blockFile); + } + + public String blockFile() { + return blockFile; } } @@ -1102,8 +1116,9 @@ private static void linkBlocks(File from, File to, int oldLV, } final ArrayList idBasedLayoutSingleLinks = Lists.newArrayList(); - linkBlocksHelper(from, to, oldLV, hl, upgradeToIdBasedLayout, to, - idBasedLayoutSingleLinks); + final Map pathCache = new HashMap<>(); + linkBlocksHelper(from, to, hl, upgradeToIdBasedLayout, to, + idBasedLayoutSingleLinks, pathCache); // Detect and remove duplicate entries. final ArrayList duplicates = @@ -1129,7 +1144,7 @@ public Void call() throws IOException { idBasedLayoutSingleLinks.size()); for (int j = iCopy; j < upperBound; j++) { LinkArgs cur = idBasedLayoutSingleLinks.get(j); - HardLink.createHardLink(cur.src, cur.dst); + HardLink.createHardLink(cur.src(), cur.dst()); } return null; } @@ -1162,9 +1177,9 @@ static ArrayList findDuplicateEntries(ArrayList all) { @Override public int compare(LinkArgs a, LinkArgs b) { return ComparisonChain.start(). - compare(a.src.getName(), b.src.getName()). - compare(a.src, b.src). - compare(a.dst, b.dst). + compare(a.blockFile(), b.blockFile()). + compare(a.src(), b.src()). + compare(a.dst(), b.dst()). result(); } }); @@ -1174,8 +1189,8 @@ public int compare(LinkArgs a, LinkArgs b) { boolean addedPrev = false; for (int i = 0; i < all.size(); i++) { LinkArgs args = all.get(i); - long blockId = Block.getBlockId(args.src.getName()); - boolean isMeta = Block.isMetaFilename(args.src.getName()); + long blockId = Block.getBlockId(args.blockFile()); + boolean isMeta = Block.isMetaFilename(args.blockFile()); if ((prevBlockId == null) || (prevBlockId.longValue() != blockId)) { prevBlockId = blockId; @@ -1214,10 +1229,10 @@ private static void removeDuplicateEntries(ArrayList all, TreeMap> highestGenstamps = new TreeMap>(); for (LinkArgs duplicate : duplicates) { - if (!Block.isMetaFilename(duplicate.src.getName())) { + if (!Block.isMetaFilename(duplicate.blockFile())) { continue; } - long blockId = Block.getBlockId(duplicate.src.getName()); + long blockId = Block.getBlockId(duplicate.blockFile()); List prevHighest = highestGenstamps.get(blockId); if (prevHighest == null) { List highest = new LinkedList(); @@ -1226,8 +1241,8 @@ private static void removeDuplicateEntries(ArrayList all, continue; } long prevGenstamp = - Block.getGenerationStamp(prevHighest.get(0).src.getName()); - long genstamp = Block.getGenerationStamp(duplicate.src.getName()); + Block.getGenerationStamp(prevHighest.get(0).blockFile()); + long genstamp = Block.getGenerationStamp(duplicate.blockFile()); if (genstamp < prevGenstamp) { continue; } @@ -1241,19 +1256,19 @@ private static void removeDuplicateEntries(ArrayList all, // from the duplicates list. for (Iterator iter = duplicates.iterator(); iter.hasNext(); ) { LinkArgs duplicate = iter.next(); - long blockId = Block.getBlockId(duplicate.src.getName()); + long blockId = Block.getBlockId(duplicate.blockFile()); List highest = highestGenstamps.get(blockId); if (highest != null) { boolean found = false; for (LinkArgs high : highest) { - if (high.src.getParent().equals(duplicate.src.getParent())) { + if (high.src().getParent().equals(duplicate.src().getParent())) { found = true; break; } } if (!found) { LOG.warn("Unexpectedly low genstamp on {}.", - duplicate.src.getAbsolutePath()); + duplicate.src().getAbsolutePath()); iter.remove(); } } @@ -1264,25 +1279,25 @@ private static void removeDuplicateEntries(ArrayList all, // preserving one block file / metadata file pair. TreeMap longestBlockFiles = new TreeMap(); for (LinkArgs duplicate : duplicates) { - if (Block.isMetaFilename(duplicate.src.getName())) { + if (Block.isMetaFilename(duplicate.blockFile())) { continue; } - long blockId = Block.getBlockId(duplicate.src.getName()); + long blockId = Block.getBlockId(duplicate.blockFile()); LinkArgs prevLongest = longestBlockFiles.get(blockId); if (prevLongest == null) { longestBlockFiles.put(blockId, duplicate); continue; } - long blockLength = duplicate.src.length(); - long prevBlockLength = prevLongest.src.length(); + long blockLength = duplicate.src().length(); + long prevBlockLength = prevLongest.src().length(); if (blockLength < prevBlockLength) { LOG.warn("Unexpectedly short length on {}.", - duplicate.src.getAbsolutePath()); + duplicate.src().getAbsolutePath()); continue; } if (blockLength > prevBlockLength) { LOG.warn("Unexpectedly short length on {}.", - prevLongest.src.getAbsolutePath()); + prevLongest.src().getAbsolutePath()); } longestBlockFiles.put(blockId, duplicate); } @@ -1291,21 +1306,22 @@ private static void removeDuplicateEntries(ArrayList all, // arbitrarily selected by us. for (Iterator iter = all.iterator(); iter.hasNext(); ) { LinkArgs args = iter.next(); - long blockId = Block.getBlockId(args.src.getName()); + long blockId = Block.getBlockId(args.blockFile()); LinkArgs bestDuplicate = longestBlockFiles.get(blockId); if (bestDuplicate == null) { continue; // file has no duplicates } - if (!bestDuplicate.src.getParent().equals(args.src.getParent())) { - LOG.warn("Discarding {}.", args.src.getAbsolutePath()); + if (!bestDuplicate.src().getParent().equals(args.src().getParent())) { + LOG.warn("Discarding {}.", args.src().getAbsolutePath()); iter.remove(); } } } - static void linkBlocksHelper(File from, File to, int oldLV, HardLink hl, - boolean upgradeToIdBasedLayout, File blockRoot, - List idBasedLayoutSingleLinks) throws IOException { + static void linkBlocksHelper(File from, File to, HardLink hl, + boolean upgradeToIdBasedLayout, File blockRoot, + List idBasedLayoutSingleLinks, Map pathCache) + throws IOException { if (!from.exists()) { return; } @@ -1345,8 +1361,18 @@ public boolean accept(File dir, String name) { throw new IOException("Failed to mkdirs " + blockLocation); } } - idBasedLayoutSingleLinks.add(new LinkArgs(new File(from, blockName), - new File(blockLocation, blockName))); + /** + * The destination path is 32x32, so 1024 distinct paths. Therefore + * we cache the destination path and reuse the same File object on + * potentially thousands of blocks located on this volume. + * This method is called recursively so the cache is passed through + * each recursive call. There is one cache per volume, and it is only + * accessed by a single thread so no locking is needed. + */ + File cachedDest = pathCache + .computeIfAbsent(blockLocation, k -> blockLocation); + idBasedLayoutSingleLinks.add(new LinkArgs(from, + cachedDest, blockName)); hl.linkStats.countSingleLinks++; } } else { @@ -1369,8 +1395,8 @@ public boolean accept(File dir, String name) { if (otherNames != null) { for (int i = 0; i < otherNames.length; i++) { linkBlocksHelper(new File(from, otherNames[i]), - new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout, - blockRoot, idBasedLayoutSingleLinks); + new File(to, otherNames[i]), hl, upgradeToIdBasedLayout, + blockRoot, idBasedLayoutSingleLinks, pathCache); } } } From d22cdd768f60fe6cedb4e025972272e24473a369 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Thu, 8 Apr 2021 20:49:08 +0530 Subject: [PATCH 215/666] HDFS-15916. DistCp: Backward compatibility: Distcp fails from Hadoop 3 to Hadoop 2 for snapshotdiff. (#2863). Contributed by Ayush Saxena. Signed-off-by: Wei-Chiu Chuang --- .../apache/hadoop/hdfs/DistributedFileSystem.java | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index fe2d077977ca2..eda92be12c627 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs; +import org.apache.hadoop.ipc.RpcNoSuchMethodException; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @@ -2388,8 +2389,15 @@ private SnapshotDiffReport getSnapshotDiffReportInternal( List deletedList = new ChunkedArrayList<>(); SnapshotDiffReportListing report; do { - report = dfs.getSnapshotDiffReportListing(snapshotDir, fromSnapshot, - toSnapshot, startPath, index); + try { + report = dfs.getSnapshotDiffReportListing(snapshotDir, fromSnapshot, + toSnapshot, startPath, index); + } catch (RpcNoSuchMethodException e) { + // In case the server doesn't support getSnapshotDiffReportListing, + // fallback to getSnapshotDiffReport. + LOG.warn("Falling back to getSnapshotDiffReport {}", e.getMessage()); + return dfs.getSnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot); + } startPath = report.getLastPath(); index = report.getLastIndex(); modifiedList.addAll(report.getModifyList()); From 99fcf27bdb6976d2a2731dece535ae3235019b59 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Thu, 8 Apr 2021 22:14:47 +0530 Subject: [PATCH 216/666] HDFS-15955. Make explicit_bzero cross platform (#2875) --- .../native/libhdfspp/lib/bindings/c/hdfs.cc | 3 ++- .../native/libhdfspp/lib/x-platform/syscall.h | 12 ++++++++++ .../libhdfspp/lib/x-platform/syscall_linux.cc | 7 ++++++ .../lib/x-platform/syscall_windows.cc | 8 +++++++ .../native/libhdfspp/tests/hdfs_ext_test.cc | 3 ++- .../native/libhdfspp/tests/hdfspp_mini_dfs.h | 3 ++- .../tests/x-platform/syscall_common_test.cc | 23 +++++++++++++++++++ 7 files changed, 56 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc index efa4c750108b7..80f9316160216 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc @@ -25,6 +25,7 @@ #include "fs/filesystem.h" #include "fs/filehandle.h" #include "x-platform/utils.h" +#include "x-platform/syscall.h" #include #include @@ -1395,7 +1396,7 @@ int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations hdfsBlockLocations *locations = new struct hdfsBlockLocations(); (*locations_out) = locations; - explicit_bzero(locations, sizeof(*locations)); + XPlatform::Syscall::ClearBufferSafely(locations, sizeof(*locations)); locations->fileLength = ppLocations->getFileLength(); locations->isLastBlockComplete = ppLocations->isLastBlockComplete(); locations->isUnderConstruction = ppLocations->isUnderConstruction(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h index 297acebfc5c8d..4f94ecbe31dfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h @@ -59,6 +59,18 @@ class Syscall { */ static bool FnMatch(const std::string& pattern, const std::string& str); + /** + * Clears the given {@link buffer} upto {@link sz_bytes} by + * filling them with zeros. This method is immune to compiler + * optimizations and guarantees that the first {@link sz_bytes} of + * {@link buffer} is cleared. The {@link buffer} must be at least + * as big as {@link sz_bytes}, the behaviour is undefined otherwise. + * + * @param buffer the pointer to the buffer to clear. + * @param sz_bytes the count of the bytes to clear. + */ + static void ClearBufferSafely(void* buffer, size_t sz_bytes); + private: static bool WriteToStdoutImpl(const char* message); }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc index 2c51dbfddfca6..9821cc7110b2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc @@ -41,3 +41,10 @@ bool XPlatform::Syscall::WriteToStdoutImpl(const char* message) { const auto result = write(1, message, message_len); return result == static_cast(message_len); } + +void XPlatform::Syscall::ClearBufferSafely(void* buffer, + const size_t sz_bytes) { + if (buffer != nullptr) { + explicit_bzero(buffer, sz_bytes); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc index dc9ba63634f1b..5a3423a99f196 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc @@ -17,6 +17,7 @@ */ #include +#include #include #include "syscall.h" @@ -49,3 +50,10 @@ bool XPlatform::Syscall::WriteToStdoutImpl(const char* message) { WriteFile(stdout_handle, message, message_len, &bytes_written, nullptr); return result && static_cast(message_len) == bytes_written; } + +void XPlatform::Syscall::ClearBufferSafely(void* buffer, + const size_t sz_bytes) { + if (buffer != nullptr) { + SecureZeroMemory(buffer, sz_bytes); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc index fb55172633af0..34e53842b1605 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc @@ -18,6 +18,7 @@ #include "hdfspp_mini_dfs.h" #include "hdfspp/hdfs_ext.h" +#include "x-platform/syscall.h" #include #include @@ -475,7 +476,7 @@ TEST_F(HdfsExtTest, TestReadStats) { hdfsFile file = hdfsOpenFile(fs, path.c_str(), O_WRONLY, 0, 0, 0); EXPECT_NE(nullptr, file); void * buf = malloc(size); - explicit_bzero(buf, size); + XPlatform::Syscall::ClearBufferSafely(buf, size); EXPECT_EQ(size, hdfsWrite(fs, file, buf, size)); free(buf); EXPECT_EQ(0, hdfsCloseFile(fs, file)); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h index 98edbdc1d6501..aae8d83563f1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h @@ -19,6 +19,7 @@ #include "hdfs/hdfs.h" #include "hdfspp/hdfspp.h" #include +#include "x-platform/syscall.h" #include #include @@ -92,7 +93,7 @@ class HdfsHandle { hdfsFile file = hdfsOpenFile(*this, path.c_str(), O_WRONLY, 0, 0, 0); EXPECT_NE(nullptr, file); void * buf = malloc(size); - explicit_bzero(buf, size); + XPlatform::Syscall::ClearBufferSafely(buf, size); EXPECT_EQ(1024, hdfsWrite(*this, file, buf, size)); EXPECT_EQ(0, hdfsCloseFile(*this, file)); free(buf); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc index 04da29a33fb5c..d68b2afef9eea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc @@ -18,7 +18,9 @@ #include +#include #include +#include #include "x-platform/syscall.h" @@ -45,3 +47,24 @@ TEST(XPlatformSyscall, FnMatchNegativeQuestionMark) { const std::string str("abc.doc"); EXPECT_FALSE(XPlatform::Syscall::FnMatch(pattern, str)); } + +TEST(XPlatformSyscall, ClearBufferSafelyChars) { + std::vector alphabets(26); + std::iota(alphabets.begin(), alphabets.end(), 'a'); + + XPlatform::Syscall::ClearBufferSafely(alphabets.data(), alphabets.size()); + for (const auto alphabet : alphabets) { + EXPECT_EQ(alphabet, '\0'); + } +} + +TEST(XPlatformSyscall, ClearBufferSafelyNumbers) { + std::vector numbers(200); + std::iota(numbers.begin(), numbers.end(), 0); + + XPlatform::Syscall::ClearBufferSafely(numbers.data(), + numbers.size() * sizeof(int)); + for (const auto number : numbers) { + EXPECT_EQ(number, 0); + } +} \ No newline at end of file From ac364a2d2454090754a5811e4773b04d916e3efe Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Fri, 9 Apr 2021 00:34:15 +0000 Subject: [PATCH 217/666] YARN-10503. Support queue capacity in terms of absolute resources with custom resourceType. Contributed by Qi Zhu. --- .../yarn/util/resource/ResourceUtils.java | 15 ++++ .../CapacitySchedulerConfiguration.java | 35 ++++++-- .../TestCSAllocateCustomResource.java | 82 +++++++++++++++++++ .../test/resources/resource-types-test.xml | 2 +- 4 files changed, 126 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java index f7c75a6079ffb..3654965c05df1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java @@ -902,4 +902,19 @@ private static void validateResourceTypes( } } } + + public static StringBuilder + getCustomResourcesStrings(Resource resource) { + StringBuilder res = new StringBuilder(); + if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) { + ResourceInformation[] resources = + resource.getResources(); + for (int i = 2; i < resources.length; i++) { + ResourceInformation resInfo = resources[i]; + res.append("," + + resInfo.getName() + "=" + resInfo.getValue()); + } + } + return res; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 266cbb4d34601..074e3711ba757 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -2350,11 +2350,14 @@ public void setAutoCreatedLeafQueueTemplateCapacityByLabel(String queuePath, getAutoCreatedQueueTemplateConfPrefix(queuePath); StringBuilder resourceString = new StringBuilder(); + resourceString .append("[" + AbsoluteResourceType.MEMORY.toString().toLowerCase() + "=" + resource.getMemorySize() + "," + AbsoluteResourceType.VCORES.toString().toLowerCase() + "=" - + resource.getVirtualCores() + "]"); + + resource.getVirtualCores() + + ResourceUtils. + getCustomResourcesStrings(resource) + "]"); setCapacityByLabel(leafQueueConfPrefix, label, resourceString.toString()); } @@ -2385,11 +2388,14 @@ public void setAutoCreatedLeafQueueTemplateMaxCapacity(String queuePath, queuePath); StringBuilder resourceString = new StringBuilder(); + resourceString .append("[" + AbsoluteResourceType.MEMORY.toString().toLowerCase() + "=" + resource.getMemorySize() + "," + AbsoluteResourceType.VCORES.toString().toLowerCase() + "=" - + resource.getVirtualCores() + "]"); + + resource.getVirtualCores() + + ResourceUtils. + getCustomResourcesStrings(resource) + "]"); setMaximumCapacityByLabel(leafQueueConfPrefix, label, resourceString.toString()); } @@ -2489,11 +2495,14 @@ private void updateMinMaxResourceToConf(String label, String queue, } StringBuilder resourceString = new StringBuilder(); + resourceString .append("[" + AbsoluteResourceType.MEMORY.toString().toLowerCase() + "=" + resource.getMemorySize() + "," + AbsoluteResourceType.VCORES.toString().toLowerCase() + "=" - + resource.getVirtualCores() + "]"); + + resource.getVirtualCores() + + ResourceUtils. + getCustomResourcesStrings(resource) + "]"); String prefix = getQueuePrefix(queue) + type; if (!label.isEmpty()) { @@ -2567,8 +2576,12 @@ private Resource internalGetLabeledResourceRequirementForQueue(String queue, private void updateResourceValuesFromConfig(Set resourceTypes, Resource resource, String[] splits) { + String resourceName = splits[0].trim(); + // If key is not a valid type, skip it. - if (!resourceTypes.contains(splits[0])) { + if (!resourceTypes.contains(resourceName) + && !ResourceUtils.getResourceTypes().containsKey(resourceName)) { + LOG.error(resourceName + " not supported."); return; } @@ -2581,9 +2594,17 @@ private void updateResourceValuesFromConfig(Set resourceTypes, resourceValue = UnitsConversionUtil.convert(units, "Mi", resourceValue); } + // Custom resource type defined by user. + // Such as GPU FPGA etc. + if (!resourceTypes.contains(resourceName)) { + resource.setResourceInformation(resourceName, ResourceInformation + .newInstance(resourceName, units, resourceValue)); + return; + } + // map it based on key. AbsoluteResourceType resType = AbsoluteResourceType - .valueOf(StringUtils.toUpperCase(splits[0].trim())); + .valueOf(StringUtils.toUpperCase(resourceName)); switch (resType) { case MEMORY : resource.setMemorySize(resourceValue); @@ -2592,8 +2613,8 @@ private void updateResourceValuesFromConfig(Set resourceTypes, resource.setVirtualCores(resourceValue.intValue()); break; default : - resource.setResourceInformation(splits[0].trim(), ResourceInformation - .newInstance(splits[0].trim(), units, resourceValue)); + resource.setResourceInformation(resourceName, ResourceInformation + .newInstance(resourceName, units, resourceValue)); break; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java index 7b0254cdcc5da..36b3c9b4d63ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -50,10 +51,16 @@ import java.io.File; import java.io.IOException; + import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Set; +import java.util.Map; +import java.util.Arrays; +import java.util.stream.Collectors; +import static org.apache.hadoop.yarn.api.records.ResourceInformation.FPGA_URI; import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION_MB; import static org.junit.Assert.assertEquals; @@ -248,4 +255,79 @@ public void testClusterMetricsWithGPU() .get(GPU_URI)).longValue(), 0); ClusterMetrics.destroy(); } + + /** + * Test CS absolute conf with Custom resource type. + * */ + @Test + public void testCapacitySchedulerAbsoluteConfWithCustomResourceType() + throws IOException { + // reset resource types + ResourceUtils.resetResourceTypes(); + String resourceTypesFileName = "resource-types-test.xml"; + File source = new File( + conf.getClassLoader().getResource(resourceTypesFileName).getFile()); + resourceTypesFile = new File(source.getParent(), "resource-types.xml"); + FileUtils.copyFile(source, resourceTypesFile); + + CapacitySchedulerConfiguration newConf = + new CapacitySchedulerConfiguration(conf); + + // Only memory vcores for first class. + Set resourceTypes = Arrays. + stream(CapacitySchedulerConfiguration. + AbsoluteResourceType.values()). + map(value -> value.toString().toLowerCase()). + collect(Collectors.toSet()); + + Map valuesMin = Maps.newHashMap(); + valuesMin.put(GPU_URI, 10L); + valuesMin.put(FPGA_URI, 10L); + valuesMin.put("testType", 10L); + + Map valuesMax = Maps.newHashMap(); + valuesMax.put(GPU_URI, 100L); + valuesMax.put(FPGA_URI, 100L); + valuesMax.put("testType", 100L); + + Resource aMINRES = + Resource.newInstance(1000, 10, valuesMin); + + Resource aMAXRES = + Resource.newInstance(1000, 10, valuesMax); + + // Define top-level queues + newConf.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[] {"a", "b", "c"}); + newConf.setMinimumResourceRequirement("", "root.a", + aMINRES); + newConf.setMaximumResourceRequirement("", "root.a", + aMAXRES); + + newConf.setClass(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, + DominantResourceCalculator.class, ResourceCalculator.class); + + //start RM + MockRM rm = new MockRM(newConf); + rm.start(); + + // Check the gpu resource conf is right. + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + Assert.assertEquals(aMINRES, + cs.getConfiguration(). + getMinimumResourceRequirement("", "root.a", resourceTypes)); + Assert.assertEquals(aMAXRES, + cs.getConfiguration(). + getMaximumResourceRequirement("", "root.a", resourceTypes)); + + // Check the gpu resource of queue is right. + Assert.assertEquals(aMINRES, cs.getQueue("root.a"). + getQueueResourceQuotas().getConfiguredMinResource()); + Assert.assertEquals(aMAXRES, cs.getQueue("root.a"). + getQueueResourceQuotas().getConfiguredMaxResource()); + + rm.close(); + + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/resource-types-test.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/resource-types-test.xml index be4f07428c521..2ae7531172b4a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/resource-types-test.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/resource-types-test.xml @@ -17,6 +17,6 @@ yarn.resource-types - yarn.io/gpu, yarn.io/fpga + yarn.io/gpu, yarn.io/fpga, testType \ No newline at end of file From 9b4b61724ab772d62dd5e289aa107a4644546de0 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Fri, 9 Apr 2021 07:40:18 +0530 Subject: [PATCH 218/666] HDFS-15940. Fix TestBlockRecovery2#testRaceBetweenReplicaRecoveryAndFinalizeBlock (ADDENDUM) (#2874) --- .../hadoop/hdfs/server/datanode/TestBlockRecovery2.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java index 03d5851f23257..8d2df18711256 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java @@ -229,8 +229,8 @@ public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() tearDown(); Configuration configuration = new HdfsConfiguration(); - configuration.set( - DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000"); + configuration.setLong( + DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, 5000L); MiniDFSCluster cluster = new MiniDFSCluster.Builder(configuration) .numDataNodes(1).build(); try { @@ -257,6 +257,7 @@ public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() dataNode.initReplicaRecovery(recoveringBlock); } } catch (Exception e) { + LOG.error("Something went wrong.", e); recoveryInitResult.set(false); } }); From e8021e648bdc6d28b453aee3603c7c56705d538f Mon Sep 17 00:00:00 2001 From: lichaojacobs Date: Fri, 9 Apr 2021 10:58:53 +0800 Subject: [PATCH 219/666] MAPREDUCE-7329: HadoopPipes task may fail when linux kernel version change from 3.x to 4.x (#2775) --- .../hadoop/mapred/pipes/Application.java | 52 ++++++++++++ .../mapred/pipes/TestPipeApplication.java | 83 +++++++++++++++++++ 2 files changed, 135 insertions(+) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java index 83d250981f71e..5416d2693682d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java @@ -30,12 +30,14 @@ import javax.crypto.SecretKey; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.FloatWritable; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; @@ -52,6 +54,7 @@ import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier; import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; @@ -66,6 +69,7 @@ class Application handler; @@ -133,6 +137,13 @@ class Application expectedClosedCount == cleaner.getCloseSocketCount(), 100, 5000); + } + + @Test + public void testSocketTimeout() throws Exception { + ServerSocket serverSocket = setupServerSocket(); + SocketCleaner cleaner = setupCleaner(serverSocket, 100); + try { + new Socket(serverSocket.getInetAddress(), serverSocket.getLocalPort()); + Thread.sleep(1000); + } catch (Exception exception) { + // ignored... + } + GenericTestUtils.waitFor(() -> 1 == cleaner.getCloseSocketCount(), 100, + 5000); + } + + private SocketCleaner setupCleaner(ServerSocket serverSocket) { + return setupCleaner(serverSocket, + CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT); + } + + private SocketCleaner setupCleaner(ServerSocket serverSocket, int soTimeout) { + // start socket cleaner. + SocketCleaner cleaner = new SocketCleaner("test-ping-socket-cleaner", + serverSocket, soTimeout); + cleaner.setDaemon(true); + cleaner.start(); + + return cleaner; + } + + private static class SocketCleaner extends PingSocketCleaner { + private int closeSocketCount = 0; + + SocketCleaner(String name, ServerSocket serverSocket, int soTimeout) { + super(name, serverSocket, soTimeout); + } + + @Override + public void run() { + super.run(); + } + + protected void closeSocketInternal(Socket clientSocket) { + if (!clientSocket.isClosed()) { + closeSocketCount++; + } + super.closeSocketInternal(clientSocket); + } + + public int getCloseSocketCount() { + return closeSocketCount; + } + } + + private ServerSocket setupServerSocket() throws Exception { + return new ServerSocket(0, 1); + } + /** * clean previous std error and outs */ From 7b11dd7c95a301675e853560d9af8ed6c1bc93f4 Mon Sep 17 00:00:00 2001 From: sumangala-patki <70206833+sumangala-patki@users.noreply.github.com> Date: Fri, 9 Apr 2021 09:31:23 +0530 Subject: [PATCH 220/666] HADOOP-17576. ABFS: Disable throttling update for auth failures (#2761) Contributed by Sumangala Patki --- .../azurebfs/services/AbfsRestOperation.java | 24 ++++------ .../ITestAbfsRestOperationException.java | 47 +++++++++++++++++-- 2 files changed, 52 insertions(+), 19 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index 24ec2926647e6..584b71f1ee5bf 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -34,7 +34,6 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidAbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; -import org.apache.hadoop.fs.azurebfs.oauth2.AzureADAuthenticator.HttpException; /** * The AbfsRestOperation for Rest AbfsClient. @@ -233,7 +232,13 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS hasRequestBody ? bufferLength : 0); break; } + } catch (IOException e) { + LOG.debug("Auth failure: {}, {}", method, url); + throw new AbfsRestOperationException(-1, null, + "Auth failure: " + e.getMessage(), e); + } + try { // dump the headers AbfsIoUtils.dumpHeadersToDebugLog("Request Headers", httpOperation.getConnection().getRequestProperties()); @@ -256,9 +261,7 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS } } catch (UnknownHostException ex) { String hostname = null; - if (httpOperation != null) { - hostname = httpOperation.getHost(); - } + hostname = httpOperation.getHost(); LOG.warn("Unknown host name: %s. Retrying to resolve the host name...", hostname); if (!client.getRetryPolicy().shouldRetry(retryCount, -1)) { @@ -267,24 +270,13 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS return false; } catch (IOException ex) { if (LOG.isDebugEnabled()) { - if (httpOperation != null) { - LOG.debug("HttpRequestFailure: " + httpOperation.toString(), ex); - } else { - LOG.debug("HttpRequestFailure: " + method + "," + url, ex); - } + LOG.debug("HttpRequestFailure: {}, {}", httpOperation.toString(), ex); } if (!client.getRetryPolicy().shouldRetry(retryCount, -1)) { throw new InvalidAbfsRestOperationException(ex); } - // once HttpException is thrown by AzureADAuthenticator, - // it indicates the policy in AzureADAuthenticator determined - // retry is not needed - if (ex instanceof HttpException) { - throw new AbfsRestOperationException((HttpException) ex); - } - return false; } finally { AbfsClientThrottlingIntercept.updateMetrics(operationType, httpOperation); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java index 1d86de7ebeb5d..a71e7bc815f75 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java @@ -20,21 +20,31 @@ import java.io.IOException; +import org.assertj.core.api.Assertions; +import org.junit.Assert; +import org.junit.Test; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; +import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode; import org.apache.hadoop.fs.azurebfs.oauth2.RetryTestTokenProvider; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.junit.Assert; -import org.junit.Test; - +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.DOT; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_ABFS_ACCOUNT_NAME; import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Verify the AbfsRestOperationException error message format. * */ public class ITestAbfsRestOperationException extends AbstractAbfsIntegrationTest{ + private static final String RETRY_TEST_TOKEN_PROVIDER = "org.apache.hadoop.fs.azurebfs.oauth2.RetryTestTokenProvider"; + public ITestAbfsRestOperationException() throws Exception { super(); } @@ -114,4 +124,35 @@ public void testWithDifferentCustomTokenFetchRetry(int numOfRetries) throws Exce + ") done, does not match with fs.azure.custom.token.fetch.retry.count configured (" + numOfRetries + ")", RetryTestTokenProvider.reTryCount == numOfRetries); } + + @Test + public void testAuthFailException() throws Exception { + Configuration config = new Configuration(getRawConfiguration()); + String accountName = config + .get(FS_AZURE_ABFS_ACCOUNT_NAME); + // Setup to configure custom token provider + config.set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME + DOT + + accountName, "Custom"); + config.set( + FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME + DOT + accountName, + RETRY_TEST_TOKEN_PROVIDER); + // Stop filesystem creation as it will lead to calls to store. + config.set(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, "false"); + + final AzureBlobFileSystem fs = getFileSystem(config); + try { + fs.getFileStatus(new Path("/")); + fail("Should fail at auth token fetch call"); + } catch (AbfsRestOperationException e) { + String errorDesc = "Should throw RestOp exception on AAD failure"; + Assertions.assertThat(e.getStatusCode()) + .describedAs("Incorrect status code. " + errorDesc).isEqualTo(-1); + Assertions.assertThat(e.getErrorCode()) + .describedAs("Incorrect error code. " + errorDesc) + .isEqualTo(AzureServiceErrorCode.UNKNOWN); + Assertions.assertThat(e.getErrorMessage()) + .describedAs("Incorrect error message. " + errorDesc) + .contains("Auth failure: "); + } + } } \ No newline at end of file From 50b1b459b7430de5ff56700ee782c3947cbdac87 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 9 Apr 2021 15:53:51 +0900 Subject: [PATCH 221/666] MAPREDUCE-7334. TestJobEndNotifier fails. (#2877) --- .../test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java index 75893f5660699..e3d1241cdc679 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java @@ -80,7 +80,7 @@ public void doGet(HttpServletRequest request, calledTimes++; try { // Sleep for a long time - Thread.sleep(1000000); + Thread.sleep(3000); } catch (InterruptedException e) { timedOut = true; } From c357cd8ae34bc42b8003b7d9295561e844bd1d09 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Fri, 9 Apr 2021 21:31:33 +0530 Subject: [PATCH 222/666] HDFS-15962. Make strcasecmp cross platform (#2883) --- .../libhdfspp/lib/common/CMakeLists.txt | 6 +++-- .../libhdfspp/lib/common/configuration.cc | 5 ++-- .../lib/common/configuration_loader.cc | 7 ++--- .../native/libhdfspp/lib/fs/filesystem.cc | 2 +- .../native/libhdfspp/lib/rpc/CMakeLists.txt | 5 ++-- .../native/libhdfspp/lib/rpc/sasl_protocol.cc | 26 +++++++++---------- .../native/libhdfspp/lib/x-platform/syscall.h | 13 ++++++++++ .../libhdfspp/lib/x-platform/syscall_linux.cc | 6 +++++ .../lib/x-platform/syscall_windows.cc | 7 +++++ .../tests/x-platform/syscall_common_test.cc | 19 +++++++++++++- 10 files changed, 71 insertions(+), 25 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt index 87779e7f8ae81..6bd7a266fbf51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/CMakeLists.txt @@ -20,6 +20,8 @@ if(NEED_LINK_DL) endif() include_directories(${Boost_INCLUDE_DIRS} ../../include) -add_library(common_obj OBJECT status.cc sasl_digest_md5.cc ioservice_impl.cc options.cc configuration.cc configuration_loader.cc hdfs_configuration.cc uri.cc util.cc retry_policy.cc cancel_tracker.cc logging.cc libhdfs_events_impl.cc auth_info.cc namenode_info.cc statinfo.cc fsinfo.cc content_summary.cc locks.cc config_parser.cc) -add_library(common $ $) +add_library(common_obj OBJECT $ status.cc sasl_digest_md5.cc ioservice_impl.cc options.cc configuration.cc configuration_loader.cc hdfs_configuration.cc uri.cc util.cc retry_policy.cc cancel_tracker.cc logging.cc libhdfs_events_impl.cc auth_info.cc namenode_info.cc statinfo.cc fsinfo.cc content_summary.cc locks.cc config_parser.cc) +add_library(common $ $ $) target_link_libraries(common ${LIB_DL}) +target_include_directories(common_obj PRIVATE ../../lib) +target_include_directories(common PRIVATE ../../lib) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc index 298de1e3aabfe..947214bdbd50a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc @@ -33,6 +33,7 @@ #include "configuration.h" #include "hdfspp/uri.h" +#include "x-platform/syscall.h" #include #include @@ -124,10 +125,10 @@ optional Configuration::GetBool(const std::string& key) const { return optional(); } - if (!strcasecmp(raw->c_str(), "true")) { + if (XPlatform::Syscall::StringCompareIgnoreCase(*raw, "true")) { return std::experimental::make_optional(true); } - if (!strcasecmp(raw->c_str(), "false")) { + if (XPlatform::Syscall::StringCompareIgnoreCase(*raw, "false")) { return std::experimental::make_optional(false); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc index 691d2ff719542..5301137505af1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc @@ -18,6 +18,7 @@ #include "configuration_loader.h" #include "common/logging.h" +#include "x-platform/syscall.h" #include #include @@ -46,17 +47,17 @@ bool is_valid_bool(const std::string& raw) { return false; } - if (!strcasecmp(raw.c_str(), "true")) { + if (XPlatform::Syscall::StringCompareIgnoreCase(raw, "true")) { return true; } - if (!strcasecmp(raw.c_str(), "false")) { + if (XPlatform::Syscall::StringCompareIgnoreCase(raw, "false")) { return true; } return false; } bool str_to_bool(const std::string& raw) { - if (!strcasecmp(raw.c_str(), "true")) { + if (XPlatform::Syscall::StringCompareIgnoreCase(raw, "true")) { return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc index 741d6c783b62e..e92a9ee48d6b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc @@ -298,7 +298,7 @@ void FileSystemImpl::Connect(const std::string &server, void FileSystemImpl::ConnectToDefaultFs(const std::function &handler) { std::string scheme = options_.defaultFS.get_scheme(); - if (strcasecmp(scheme.c_str(), "hdfs") != 0) { + if (!XPlatform::Syscall::StringCompareIgnoreCase(scheme, "hdfs")) { std::string error_message; error_message += "defaultFS of [" + options_.defaultFS.str() + "] is not supported"; handler(Status::InvalidArgument(error_message.c_str()), nullptr); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/CMakeLists.txt index e5a26fb449da4..b50134eda9536 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/CMakeLists.txt @@ -16,7 +16,7 @@ # limitations under the License. # -list(APPEND rpc_object_items rpc_connection_impl.cc rpc_engine.cc namenode_tracker.cc request.cc sasl_protocol.cc sasl_engine.cc) +list(APPEND rpc_object_items $ rpc_connection_impl.cc rpc_engine.cc namenode_tracker.cc request.cc sasl_protocol.cc sasl_engine.cc) if (CMAKE_USING_CYRUS_SASL) list(APPEND rpc_object_items cyrus_sasl_engine.cc) endif (CMAKE_USING_CYRUS_SASL) @@ -25,7 +25,8 @@ if (CMAKE_USING_GSASL) endif (CMAKE_USING_GSASL) add_library(rpc_obj OBJECT ${rpc_object_items}) - +target_include_directories(rpc_obj PRIVATE ../../lib) add_dependencies(rpc_obj proto) add_library(rpc $) +target_include_directories(rpc PRIVATE ../../lib) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/sasl_protocol.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/sasl_protocol.cc index 6fc04f754f6cb..bc9adbff313d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/sasl_protocol.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/sasl_protocol.cc @@ -20,6 +20,7 @@ #include "rpc_connection.h" #include "common/logging.h" #include "common/optional_wrapper.h" +#include "x-platform/syscall.h" #include "sasl_engine.h" #include "sasl_protocol.h" @@ -97,20 +98,17 @@ void SaslProtocol::Authenticate(std::function +#include #include #include @@ -48,3 +49,8 @@ void XPlatform::Syscall::ClearBufferSafely(void* buffer, explicit_bzero(buffer, sz_bytes); } } + +bool XPlatform::Syscall::StringCompareIgnoreCase(const std::string& a, + const std::string& b) { + return strcasecmp(a.c_str(), b.c_str()) == 0; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc index 5a3423a99f196..2cd9e9d5157df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc @@ -20,6 +20,8 @@ #include #include +#include + #include "syscall.h" #pragma comment(lib, "Shlwapi.lib") @@ -57,3 +59,8 @@ void XPlatform::Syscall::ClearBufferSafely(void* buffer, SecureZeroMemory(buffer, sz_bytes); } } + +bool XPlatform::Syscall::StringCompareIgnoreCase(const std::string& a, + const std::string& b) { + return _stricmp(a.c_str(), b.c_str()) == 0; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc index d68b2afef9eea..7fa3971cf7edd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc @@ -67,4 +67,21 @@ TEST(XPlatformSyscall, ClearBufferSafelyNumbers) { for (const auto number : numbers) { EXPECT_EQ(number, 0); } -} \ No newline at end of file +} + +TEST(XPlatformSyscall, StringCompareIgnoreCaseBasic) { + EXPECT_TRUE(XPlatform::Syscall::StringCompareIgnoreCase("aBcDeF", "AbCdEf")); + EXPECT_TRUE(XPlatform::Syscall::StringCompareIgnoreCase("a1B2c3D4e5F", + "A1b2C3d4E5f")); + EXPECT_TRUE(XPlatform::Syscall::StringCompareIgnoreCase( + "a!1@B#2$c%3^D&4*e(5)F", "A!1@b#2$C%3^d&4*E(5)f")); + EXPECT_TRUE(XPlatform::Syscall::StringCompareIgnoreCase( + "a1@B#2$c%3^D&4*e(5)F?:", "A1@b#2$C%3^d&4*E(5)f?:")); + EXPECT_TRUE(XPlatform::Syscall::StringCompareIgnoreCase("12345", "12345")); + EXPECT_TRUE(XPlatform::Syscall::StringCompareIgnoreCase("", "")); +} + +TEST(XPlatformSyscall, StringCompareIgnoreCaseNegative) { + EXPECT_FALSE(XPlatform::Syscall::StringCompareIgnoreCase("abcd", "abcde")); + EXPECT_FALSE(XPlatform::Syscall::StringCompareIgnoreCase("12345", "abcde")); +} From 88247195db9e5b21b18bec1d41cb6dbe9f7d1840 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 11 Apr 2021 01:30:49 +0900 Subject: [PATCH 223/666] Bump y18n (#2843) Bumps [y18n](https://github.com/yargs/y18n) from 3.2.1 to 3.2.2. - [Release notes](https://github.com/yargs/y18n/releases) - [Changelog](https://github.com/yargs/y18n/blob/master/CHANGELOG.md) - [Commits](https://github.com/yargs/y18n/commits) Signed-off-by: dependabot[bot] Signed-off-by: Akira Ajisaka --- .../hadoop-yarn-ui/src/main/webapp/yarn.lock | 28 ++++++------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock index bbafc667a30ef..1d36e689da053 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock @@ -79,11 +79,6 @@ amdefine@>=0.0.4: resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5" integrity sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU= -ansi-regex@*, ansi-regex@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.0.tgz#388539f55179bf39339c81af30a654d69f87cb75" - integrity sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg== - ansi-regex@^0.2.0, ansi-regex@^0.2.1: version "0.2.1" resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9" @@ -6163,16 +6158,16 @@ spdx-expression-parse@^3.0.0: spdx-exceptions "^2.1.0" spdx-license-ids "^3.0.0" -spdx-license-ids@*, spdx-license-ids@^3.0.0: - version "3.0.5" - resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz#3694b5804567a458d3c8045842a6358632f62654" - integrity sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q== - spdx-license-ids@^1.0.0: version "1.2.2" resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57" integrity sha1-yd96NCRZSt5r0RkA1ZZpbcBrrFc= +spdx-license-ids@^3.0.0: + version "3.0.5" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz#3694b5804567a458d3c8045842a6358632f62654" + integrity sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q== + spdx@~0.4.1: version "0.4.3" resolved "https://registry.yarnpkg.com/spdx/-/spdx-0.4.3.tgz#ab373c3fcf7b84ffd8fdeb0592d24ff0d14812e4" @@ -6276,13 +6271,6 @@ stringstream@~0.0.4: resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.6.tgz#7880225b0d4ad10e30927d167a1d6f2fd3b33a72" integrity sha512-87GEBAkegbBcweToUrdzf3eLhWNg06FJTebl4BVJz/JgWy8CvEr9dRtX5qWphiynMSQlxxi+QqN0z5T32SLlhA== -strip-ansi@*: - version "6.0.0" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.0.tgz#0b1571dd7669ccd4f3e06e14ef1eed26225ae532" - integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w== - dependencies: - ansi-regex "^5.0.0" - strip-ansi@^0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220" @@ -6906,9 +6894,9 @@ xtend@^4.0.0: integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== y18n@^3.2.0, y18n@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41" - integrity sha1-bRX7qITAhnnA136I53WegR4H+kE= + version "3.2.2" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.2.tgz#85c901bd6470ce71fc4bb723ad209b70f7f28696" + integrity sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ== yallist@^2.1.2: version "2.1.2" From 45bba39ff9623db1156f6e1518592779fdffa5b9 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 12 Apr 2021 12:53:58 +0900 Subject: [PATCH 224/666] HADOOP-17608. Fix TestKMS failure (#2880) Reviewed-by: Masatake Iwasaki --- .../hadoop/crypto/key/kms/server/TestKMS.java | 52 +------------------ 1 file changed, 1 insertion(+), 51 deletions(-) diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index 3d59e6f5be7b7..a0a58ff3567f5 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.crypto.key.kms.server; -import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; import org.apache.curator.test.TestingServer; import org.apache.hadoop.conf.Configuration; @@ -92,7 +91,6 @@ import java.util.List; import java.util.Map; import java.util.Properties; -import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.LinkedBlockingQueue; @@ -113,9 +111,6 @@ public class TestKMS { private static final Logger LOG = LoggerFactory.getLogger(TestKMS.class); - private static final String SSL_RELOADER_THREAD_NAME = - "Truststore reloader thread"; - private SSLFactory sslFactory; // Keep track of all key providers created during a test case, so they can be @@ -540,34 +535,6 @@ public Void call() throws Exception { url.getProtocol().equals("https")); final URI uri = createKMSUri(getKMSUrl()); - if (ssl) { - KeyProvider testKp = createProvider(uri, conf); - ThreadGroup threadGroup = Thread.currentThread().getThreadGroup(); - while (threadGroup.getParent() != null) { - threadGroup = threadGroup.getParent(); - } - Thread[] threads = new Thread[threadGroup.activeCount()]; - threadGroup.enumerate(threads); - Thread reloaderThread = null; - for (Thread thread : threads) { - if ((thread.getName() != null) - && (thread.getName().contains(SSL_RELOADER_THREAD_NAME))) { - reloaderThread = thread; - } - } - Assert.assertTrue("Reloader is not alive", reloaderThread.isAlive()); - // Explicitly close the provider so we can verify the internal thread - // is shutdown - testKp.close(); - boolean reloaderStillAlive = true; - for (int i = 0; i < 10; i++) { - reloaderStillAlive = reloaderThread.isAlive(); - if (!reloaderStillAlive) break; - Thread.sleep(1000); - } - Assert.assertFalse("Reloader is still alive", reloaderStillAlive); - } - if (kerberos) { for (String user : new String[]{"client", "client/host"}) { doAs(user, new PrivilegedExceptionAction() { @@ -2363,8 +2330,7 @@ public Void run() throws Exception { return null; } }); - // Close the client provider. We will verify all providers' - // Truststore reloader threads are closed later. + // Close the client provider. kp.close(); return null; } finally { @@ -2375,22 +2341,6 @@ public Void run() throws Exception { return null; } }); - - // verify that providers created by KMSTokenRenewer are closed. - if (ssl) { - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - final Set threadSet = Thread.getAllStackTraces().keySet(); - for (Thread t : threadSet) { - if (t.getName().contains(SSL_RELOADER_THREAD_NAME)) { - return false; - } - } - return true; - } - }, 1000, 10000); - } } @Test From d6b554781a4b0dfca54e4aec56ac350698a2b10c Mon Sep 17 00:00:00 2001 From: lfengnan Date: Mon, 12 Apr 2021 12:42:33 -0700 Subject: [PATCH 225/666] HDFS-15423 RBF: WebHDFS create shouldn't choose DN from all sub-clusters (#2605) --- .../router/RouterWebHdfsMethods.java | 47 +++++- .../router/web/RouterWebHDFSContract.java | 2 + .../federation/MiniRouterDFSCluster.java | 19 +-- .../router/TestRouterWebHdfsMethods.java | 147 ++++++++++++++++++ 4 files changed, 198 insertions(+), 17 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index f6ac70c368a52..afc4a3d8fac21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -93,6 +93,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,6 +105,8 @@ import java.util.HashSet; import java.util.List; import java.util.Random; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** * WebHDFS Router implementation. This is an extension of @@ -453,21 +456,33 @@ private DatanodeInfo chooseDatanode(final Router router, final String path, final HttpOpParam.Op op, final long openOffset, final String excludeDatanodes) throws IOException { final RouterRpcServer rpcServer = getRPCServer(router); - DatanodeInfo[] dns = null; + DatanodeInfo[] dns = {}; + String resolvedNs = ""; try { dns = rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE); } catch (IOException e) { LOG.error("Cannot get the datanodes from the RPC server", e); } + if (op == PutOpParam.Op.CREATE) { + try { + resolvedNs = rpcServer.getCreateLocation(path).getNameserviceId(); + } catch (IOException e) { + LOG.error("Cannot get the name service " + + "to create file for path {} ", path, e); + } + } + HashSet excludes = new HashSet(); - if (excludeDatanodes != null) { - Collection collection = - getTrimmedStringCollection(excludeDatanodes); - for (DatanodeInfo dn : dns) { - if (collection.contains(dn.getName())) { - excludes.add(dn); - } + Collection collection = + getTrimmedStringCollection(excludeDatanodes); + for (DatanodeInfo dn : dns) { + String ns = getNsFromDataNodeNetworkLocation(dn.getNetworkLocation()); + if (collection.contains(dn.getName())) { + excludes.add(dn); + } else if (op == PutOpParam.Op.CREATE && !ns.equals(resolvedNs)) { + // for CREATE, the dest dn should be in the resolved ns + excludes.add(dn); } } @@ -502,6 +517,22 @@ private DatanodeInfo chooseDatanode(final Router router, return getRandomDatanode(dns, excludes); } + /** + * Get the nameservice info from datanode network location. + * @param location network location with format `/ns0/rack1` + * @return nameservice this datanode is in + */ + @VisibleForTesting + public static String getNsFromDataNodeNetworkLocation(String location) { + // network location should be in the format of /ns/rack + Pattern pattern = Pattern.compile("^/([^/]*)/"); + Matcher matcher = pattern.matcher(location); + if (matcher.find()) { + return matcher.group(1); + } + return ""; + } + /** * Get a random Datanode from a subcluster. * @param dns Nodes to be chosen from. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java index 1d308073290d4..6b90faecc78f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java @@ -64,6 +64,8 @@ public static void createCluster(Configuration conf) throws IOException { conf.addResource(CONTRACT_WEBHDFS_XML); cluster = new MiniRouterDFSCluster(true, 2, conf); + cluster.setIndependentDNs(); + cluster.setNumDatanodesPerNameservice(3); // Start NNs and DNs and wait until ready cluster.startCluster(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index 896d08f2c49b6..8a7a03e018b95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -774,6 +774,15 @@ public void startCluster(Configuration overrideConf) { } topology.setFederation(true); + // Generate conf for namenodes and datanodes + String ns0 = nameservices.get(0); + Configuration nnConf = generateNamenodeConfiguration(ns0); + if (overrideConf != null) { + nnConf.addResource(overrideConf); + // Router also uses this configurations as initial values. + routerConf = new Configuration(overrideConf); + } + // Set independent DNs across subclusters int numDNs = nameservices.size() * numDatanodesPerNameservice; Configuration[] dnConfs = null; @@ -781,7 +790,7 @@ public void startCluster(Configuration overrideConf) { dnConfs = new Configuration[numDNs]; int dnId = 0; for (String nsId : nameservices) { - Configuration subclusterConf = new Configuration(); + Configuration subclusterConf = new Configuration(nnConf); subclusterConf.set(DFS_INTERNAL_NAMESERVICES_KEY, nsId); for (int i = 0; i < numDatanodesPerNameservice; i++) { dnConfs[dnId] = subclusterConf; @@ -791,14 +800,6 @@ public void startCluster(Configuration overrideConf) { } // Start mini DFS cluster - String ns0 = nameservices.get(0); - Configuration nnConf = generateNamenodeConfiguration(ns0); - if (overrideConf != null) { - nnConf.addResource(overrideConf); - // Router also uses this configurations as initial values. - routerConf = new Configuration(overrideConf); - } - cluster = new MiniDFSCluster.Builder(nnConf) .numDataNodes(numDNs) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java new file mode 100644 index 0000000000000..7028928041452 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java @@ -0,0 +1,147 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createMountTableEntry; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.FileNotFoundException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Collections; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test suite for Router Web Hdfs methods. + */ +public class TestRouterWebHdfsMethods { + static final Logger LOG = + LoggerFactory.getLogger(TestRouterWebHdfsMethods.class); + + private static StateStoreDFSCluster cluster; + private static RouterContext router; + private static String httpUri; + + @BeforeClass + public static void globalSetUp() throws Exception { + cluster = new StateStoreDFSCluster(false, 2); + Configuration conf = new RouterConfigBuilder() + .stateStore() + .rpc() + .http() + .admin() + .build(); + cluster.addRouterOverrides(conf); + cluster.startCluster(); + cluster.startRouters(); + cluster.waitClusterUp(); + router = cluster.getRandomRouter(); + httpUri = "http://"+router.getHttpAddress(); + } + + @AfterClass + public static void tearDown() { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + @Test + public void testWebHdfsCreate() throws Exception { + // the file is created at default ns (ns0) + String path = "/tmp/file"; + URL url = new URL(getUri(path)); + LOG.info("URL: {}", url); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("PUT"); + assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); + verifyFile("ns0", path, true); + verifyFile("ns1", path, false); + conn.disconnect(); + } + + @Test + public void testWebHdfsCreateWithMounts() throws Exception { + // the file is created at mounted ns (ns1) + String mountPoint = "/tmp-ns1"; + String path = "/tmp-ns1/file"; + createMountTableEntry( + router.getRouter(), mountPoint, + DestinationOrder.RANDOM, Collections.singletonList("ns1")); + URL url = new URL(getUri(path)); + LOG.info("URL: {}", url); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("PUT"); + assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); + verifyFile("ns1", path, true); + verifyFile("ns0", path, false); + conn.disconnect(); + } + + private String getUri(String path) { + final String user = System.getProperty("user.name"); + final StringBuilder uri = new StringBuilder(httpUri); + uri.append("/webhdfs/v1"). + append(path). + append("?op=CREATE"). + append("&user.name=" + user); + return uri.toString(); + } + + private void verifyFile(String ns, String path, boolean shouldExist) + throws Exception { + FileSystem fs = cluster.getNamenode(ns, null).getFileSystem(); + try { + fs.getFileStatus(new Path(path)); + if (!shouldExist) { + fail(path + " should not exist in ns " + ns); + } + } catch (FileNotFoundException e) { + if (shouldExist) { + fail(path + " should exist in ns " + ns); + } + } + } + + @Test + public void testGetNsFromDataNodeNetworkLocation() { + assertEquals("ns0", RouterWebHdfsMethods + .getNsFromDataNodeNetworkLocation("/ns0/rack-info1")); + assertEquals("ns0", RouterWebHdfsMethods + .getNsFromDataNodeNetworkLocation("/ns0/row1/rack-info1")); + assertEquals("", RouterWebHdfsMethods + .getNsFromDataNodeNetworkLocation("/row0")); + assertEquals("", RouterWebHdfsMethods + .getNsFromDataNodeNetworkLocation("whatever-rack-info1")); + } +} From 4d835241bb32346f558348e0f2f5718ec972423f Mon Sep 17 00:00:00 2001 From: billierinaldi Date: Mon, 12 Apr 2021 19:47:59 -0400 Subject: [PATCH 226/666] HADOOP-16948. Support infinite lease dirs. (#1925) * HADOOP-16948. Support single writer dirs. * HADOOP-16948. Fix findbugs and checkstyle problems. * HADOOP-16948. Fix remaining checkstyle problems. * HADOOP-16948. Add DurationInfo, retry policy for acquiring lease, and javadocs * HADOOP-16948. Convert ABFS client to use an executor for lease ops * HADOOP-16948. Fix ABFS lease test for non-HNS * HADOOP-16948. Fix checkstyle and javadoc * HADOOP-16948. Address review comments * HADOOP-16948. Use daemon threads for ABFS lease ops * HADOOP-16948. Make lease duration configurable * HADOOP-16948. Add error messages to test assertions * HADOOP-16948. Remove extra isSingleWriterKey call * HADOOP-16948. Use only infinite lease duration due to cost of renewal ops * HADOOP-16948. Remove acquire/renew/release lease methods * HADOOP-16948. Rename single writer dirs to infinite lease dirs * HADOOP-16948. Fix checkstyle * HADOOP-16948. Wait for acquire lease future * HADOOP-16948. Add unit test for acquire lease failure --- .../hadoop/fs/azurebfs/AbfsConfiguration.java | 35 ++ .../fs/azurebfs/AzureBlobFileSystem.java | 21 ++ .../fs/azurebfs/AzureBlobFileSystemStore.java | 90 ++++- .../azurebfs/constants/AbfsHttpConstants.java | 5 + .../azurebfs/constants/ConfigurationKeys.java | 9 + .../constants/FileSystemConfigurations.java | 7 + .../constants/HttpHeaderConfigurations.java | 5 + .../ConfigurationValidationAnnotations.java | 16 + .../AzureBlobFileSystemException.java | 4 + .../services/AppendRequestParameters.java | 8 +- .../IntegerConfigurationBasicValidator.java | 13 +- .../fs/azurebfs/services/AbfsClient.java | 131 ++++++- .../fs/azurebfs/services/AbfsErrors.java | 53 +++ .../fs/azurebfs/services/AbfsLease.java | 188 ++++++++++ .../azurebfs/services/AbfsOutputStream.java | 37 +- .../services/AbfsOutputStreamContext.java | 18 + .../azurebfs/services/AbfsRestOperation.java | 1 + .../services/AbfsRestOperationType.java | 3 +- .../hadoop-azure/src/site/markdown/abfs.md | 16 + .../ITestAzureBlobFileSystemLease.java | 336 ++++++++++++++++++ .../TestConfigurationValidators.java | 29 +- .../services/TestAbfsOutputStream.java | 49 +-- 22 files changed, 1032 insertions(+), 42 deletions(-) create mode 100644 hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsErrors.java create mode 100644 hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLease.java create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index f36cc7d5bfde0..0a8224aaaeb58 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.constants.AuthConfigurations; import org.apache.hadoop.fs.azurebfs.contracts.annotations.ConfigurationValidationAnnotations.IntegerConfigurationValidatorAnnotation; +import org.apache.hadoop.fs.azurebfs.contracts.annotations.ConfigurationValidationAnnotations.IntegerWithOutlierConfigurationValidatorAnnotation; import org.apache.hadoop.fs.azurebfs.contracts.annotations.ConfigurationValidationAnnotations.LongConfigurationValidatorAnnotation; import org.apache.hadoop.fs.azurebfs.contracts.annotations.ConfigurationValidationAnnotations.StringConfigurationValidatorAnnotation; import org.apache.hadoop.fs.azurebfs.contracts.annotations.ConfigurationValidationAnnotations.Base64StringConfigurationValidatorAnnotation; @@ -208,6 +209,15 @@ public class AbfsConfiguration{ DefaultValue = DEFAULT_FS_AZURE_APPEND_BLOB_DIRECTORIES) private String azureAppendBlobDirs; + @StringConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_INFINITE_LEASE_KEY, + DefaultValue = DEFAULT_FS_AZURE_INFINITE_LEASE_DIRECTORIES) + private String azureInfiniteLeaseDirs; + + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_LEASE_THREADS, + MinValue = MIN_LEASE_THREADS, + DefaultValue = DEFAULT_LEASE_THREADS) + private int numLeaseThreads; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, DefaultValue = DEFAULT_AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION) private boolean createRemoteFileSystemDuringInitialization; @@ -296,6 +306,8 @@ public AbfsConfiguration(final Configuration rawConfig, String accountName) field.setAccessible(true); if (field.isAnnotationPresent(IntegerConfigurationValidatorAnnotation.class)) { field.set(this, validateInt(field)); + } else if (field.isAnnotationPresent(IntegerWithOutlierConfigurationValidatorAnnotation.class)) { + field.set(this, validateIntWithOutlier(field)); } else if (field.isAnnotationPresent(LongConfigurationValidatorAnnotation.class)) { field.set(this, validateLong(field)); } else if (field.isAnnotationPresent(StringConfigurationValidatorAnnotation.class)) { @@ -634,6 +646,14 @@ public String getAppendBlobDirs() { return this.azureAppendBlobDirs; } + public String getAzureInfiniteLeaseDirs() { + return this.azureInfiniteLeaseDirs; + } + + public int getNumLeaseThreads() { + return this.numLeaseThreads; + } + public boolean getCreateRemoteFileSystemDuringInitialization() { // we do not support creating the filesystem when AuthType is SAS return this.createRemoteFileSystemDuringInitialization @@ -843,6 +863,21 @@ int validateInt(Field field) throws IllegalAccessException, InvalidConfiguration validator.ThrowIfInvalid()).validate(value); } + int validateIntWithOutlier(Field field) throws IllegalAccessException, InvalidConfigurationValueException { + IntegerWithOutlierConfigurationValidatorAnnotation validator = + field.getAnnotation(IntegerWithOutlierConfigurationValidatorAnnotation.class); + String value = get(validator.ConfigurationKey()); + + // validate + return new IntegerConfigurationBasicValidator( + validator.OutlierValue(), + validator.MinValue(), + validator.MaxValue(), + validator.DefaultValue(), + validator.ConfigurationKey(), + validator.ThrowIfInvalid()).validate(value); + } + long validateLong(Field field) throws IllegalAccessException, InvalidConfigurationValueException { LongConfigurationValidatorAnnotation validator = field.getAnnotation(LongConfigurationValidatorAnnotation.class); String value = rawConfig.get(validator.ConfigurationKey()); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index d8a2ed7bcd542..30108ed1e2fb3 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -87,6 +87,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.functional.RemoteIterators; +import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.util.Progressable; @@ -505,6 +506,26 @@ public FileStatus getFileStatus(final Path f) throws IOException { } } + /** + * Break the current lease on an ABFS file if it exists. A lease that is broken cannot be + * renewed. A new lease may be obtained on the file immediately. + * + * @param f file name + * @throws IOException on any exception while breaking the lease + */ + public void breakLease(final Path f) throws IOException { + LOG.debug("AzureBlobFileSystem.breakLease path: {}", f); + + Path qualifiedPath = makeQualified(f); + + try (DurationInfo ignored = new DurationInfo(LOG, false, "Break lease for %s", + qualifiedPath)) { + abfsStore.breakLease(qualifiedPath); + } catch(AzureBlobFileSystemException ex) { + checkException(f, ex); + } + } + /** * Qualify a path to one which uses this FileSystem and, if relative, * made absolute. diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index 75419c26dd214..fa7e12bc80e28 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -39,6 +39,7 @@ import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -48,10 +49,14 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.WeakHashMap; +import java.util.concurrent.ExecutionException; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -100,6 +105,7 @@ import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; import org.apache.hadoop.fs.azurebfs.services.AuthType; import org.apache.hadoop.fs.azurebfs.services.ExponentialRetryPolicy; +import org.apache.hadoop.fs.azurebfs.services.AbfsLease; import org.apache.hadoop.fs.azurebfs.services.SharedKeyCredentials; import org.apache.hadoop.fs.azurebfs.services.AbfsPerfTracker; import org.apache.hadoop.fs.azurebfs.services.AbfsPerfInfo; @@ -145,8 +151,11 @@ public class AzureBlobFileSystemStore implements Closeable, ListingSupport { private static final String XMS_PROPERTIES_ENCODING = "ISO-8859-1"; private static final int GET_SET_AGGREGATE_COUNT = 2; + private final Map leaseRefs; + private final AbfsConfiguration abfsConfiguration; private final Set azureAtomicRenameDirSet; + private Set azureInfiniteLeaseDirSet; private Trilean isNamespaceEnabled; private final AuthType authType; private final UserGroupInformation userGroupInformation; @@ -167,6 +176,8 @@ public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, final String fileSystemName = authorityParts[0]; final String accountName = authorityParts[1]; + leaseRefs = Collections.synchronizedMap(new WeakHashMap<>()); + try { this.abfsConfiguration = new AbfsConfiguration(configuration, accountName); } catch (IllegalAccessException exception) { @@ -195,6 +206,7 @@ public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, this.azureAtomicRenameDirSet = new HashSet<>(Arrays.asList( abfsConfiguration.getAzureAtomicRenameDirs().split(AbfsHttpConstants.COMMA))); + updateInfiniteLeaseDirs(); this.authType = abfsConfiguration.getAuthType(accountName); boolean usingOauth = (authType == AuthType.OAuth); boolean useHttps = (usingOauth || abfsConfiguration.isHttpsAlwaysUsed()) ? true : isSecureScheme; @@ -246,7 +258,24 @@ public String getPrimaryGroup() { @Override public void close() throws IOException { - IOUtils.cleanupWithLogger(LOG, client); + List> futures = new ArrayList<>(); + for (AbfsLease lease : leaseRefs.keySet()) { + if (lease == null) { + continue; + } + ListenableFuture future = client.submit(() -> lease.free()); + futures.add(future); + } + try { + Futures.allAsList(futures).get(); + } catch (InterruptedException e) { + LOG.error("Interrupted freeing leases", e); + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + LOG.error("Error freeing leases", e); + } finally { + IOUtils.cleanupWithLogger(LOG, client); + } } byte[] encodeAttribute(String value) throws UnsupportedEncodingException { @@ -496,12 +525,14 @@ public OutputStream createFile(final Path path, } perfInfo.registerResult(op.getResult()).registerSuccess(true); + AbfsLease lease = maybeCreateLease(relativePath); + return new AbfsOutputStream( client, statistics, relativePath, 0, - populateAbfsOutputStreamContext(isAppendBlob)); + populateAbfsOutputStreamContext(isAppendBlob, lease)); } } @@ -573,7 +604,8 @@ private AbfsRestOperation conditionalCreateOverwriteFile(final String relativePa return op; } - private AbfsOutputStreamContext populateAbfsOutputStreamContext(boolean isAppendBlob) { + private AbfsOutputStreamContext populateAbfsOutputStreamContext(boolean isAppendBlob, + AbfsLease lease) { int bufferSize = abfsConfiguration.getWriteBufferSize(); if (isAppendBlob && bufferSize > FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE) { bufferSize = FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE; @@ -587,6 +619,7 @@ private AbfsOutputStreamContext populateAbfsOutputStreamContext(boolean isAppend .withAppendBlob(isAppendBlob) .withWriteMaxConcurrentRequestCount(abfsConfiguration.getWriteMaxConcurrentRequestCount()) .withMaxWriteRequestsToQueue(abfsConfiguration.getMaxWriteRequestsToQueue()) + .withLease(lease) .build(); } @@ -705,15 +738,29 @@ public OutputStream openFileForWrite(final Path path, final FileSystem.Statistic isAppendBlob = true; } + AbfsLease lease = maybeCreateLease(relativePath); + return new AbfsOutputStream( client, statistics, relativePath, offset, - populateAbfsOutputStreamContext(isAppendBlob)); + populateAbfsOutputStreamContext(isAppendBlob, lease)); } } + /** + * Break any current lease on an ABFS file. + * + * @param path file name + * @throws AzureBlobFileSystemException on any exception while breaking the lease + */ + public void breakLease(final Path path) throws AzureBlobFileSystemException { + LOG.debug("lease path: {}", path); + + client.breakLease(getRelativePath(path)); + } + public void rename(final Path source, final Path destination) throws AzureBlobFileSystemException { final Instant startAggregate = abfsPerfTracker.getLatencyInstant(); @@ -1347,6 +1394,13 @@ public boolean isAtomicRenameKey(String key) { return isKeyForDirectorySet(key, azureAtomicRenameDirSet); } + public boolean isInfiniteLeaseKey(String key) { + if (azureInfiniteLeaseDirSet.isEmpty()) { + return false; + } + return isKeyForDirectorySet(key, azureInfiniteLeaseDirSet); + } + /** * A on-off operation to initialize AbfsClient for AzureBlobFileSystem * Operations. @@ -1636,4 +1690,32 @@ void setNamespaceEnabled(Trilean isNamespaceEnabled){ this.isNamespaceEnabled = isNamespaceEnabled; } + private void updateInfiniteLeaseDirs() { + this.azureInfiniteLeaseDirSet = new HashSet<>(Arrays.asList( + abfsConfiguration.getAzureInfiniteLeaseDirs().split(AbfsHttpConstants.COMMA))); + // remove the empty string, since isKeyForDirectory returns true for empty strings + // and we don't want to default to enabling infinite lease dirs + this.azureInfiniteLeaseDirSet.remove(""); + } + + private AbfsLease maybeCreateLease(String relativePath) + throws AzureBlobFileSystemException { + boolean enableInfiniteLease = isInfiniteLeaseKey(relativePath); + if (!enableInfiniteLease) { + return null; + } + AbfsLease lease = new AbfsLease(client, relativePath); + leaseRefs.put(lease, null); + return lease; + } + + @VisibleForTesting + boolean areLeasesFreed() { + for (AbfsLease lease : leaseRefs.keySet()) { + if (lease != null && !lease.isFreed()) { + return false; + } + } + return true; + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java index 184657e7d66ad..5cf7ec565b59e 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java @@ -39,6 +39,11 @@ public final class AbfsHttpConstants { public static final String GET_ACCESS_CONTROL = "getAccessControl"; public static final String CHECK_ACCESS = "checkAccess"; public static final String GET_STATUS = "getStatus"; + public static final String ACQUIRE_LEASE_ACTION = "acquire"; + public static final String BREAK_LEASE_ACTION = "break"; + public static final String RELEASE_LEASE_ACTION = "release"; + public static final String RENEW_LEASE_ACTION = "renew"; + public static final String DEFAULT_LEASE_BREAK_PERIOD = "0"; public static final String DEFAULT_TIMEOUT = "90"; public static final String APPEND_BLOB_TYPE = "appendblob"; public static final String TOKEN_VERSION = "2"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java index 02b143cd61bd3..4fe1d1c276db5 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java @@ -87,6 +87,15 @@ public final class ConfigurationKeys { /** Provides a config to provide comma separated path prefixes on which Appendblob based files are created * Default is empty. **/ public static final String FS_AZURE_APPEND_BLOB_KEY = "fs.azure.appendblob.directories"; + /** Provides a config to provide comma separated path prefixes which support infinite leases. + * Files under these paths will be leased when created or opened for writing and the lease will + * be released when the file is closed. The lease may be broken with the breakLease method on + * AzureBlobFileSystem. Default is empty. + * **/ + public static final String FS_AZURE_INFINITE_LEASE_KEY = "fs.azure.infinite-lease.directories"; + /** Provides a number of threads to use for lease operations for infinite lease directories. + * Must be set to a minimum of 1 if infinite lease directories are to be used. Default is 0. **/ + public static final String FS_AZURE_LEASE_THREADS = "fs.azure.lease.threads"; public static final String FS_AZURE_READ_AHEAD_QUEUE_DEPTH = "fs.azure.readaheadqueue.depth"; public static final String FS_AZURE_ALWAYS_READ_BUFFER_SIZE = "fs.azure.read.alwaysReadBufferSize"; public static final String FS_AZURE_READ_AHEAD_BLOCK_SIZE = "fs.azure.read.readahead.blocksize"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java index d90f525712af7..040b18ae4c281 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java @@ -78,6 +78,13 @@ public final class FileSystemConfigurations { public static final boolean DEFAULT_FS_AZURE_ENABLE_CONDITIONAL_CREATE_OVERWRITE = true; public static final boolean DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE = true; public static final String DEFAULT_FS_AZURE_APPEND_BLOB_DIRECTORIES = ""; + public static final String DEFAULT_FS_AZURE_INFINITE_LEASE_DIRECTORIES = ""; + public static final int DEFAULT_LEASE_THREADS = 0; + public static final int MIN_LEASE_THREADS = 0; + public static final int DEFAULT_LEASE_DURATION = -1; + public static final int INFINITE_LEASE_DURATION = -1; + public static final int MIN_LEASE_DURATION = 15; + public static final int MAX_LEASE_DURATION = 60; public static final int DEFAULT_READ_AHEAD_QUEUE_DEPTH = -1; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java index 27ddcee695aaa..232553844fcf3 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java @@ -60,6 +60,11 @@ public final class HttpHeaderConfigurations { public static final String X_MS_UMASK = "x-ms-umask"; public static final String X_MS_NAMESPACE_ENABLED = "x-ms-namespace-enabled"; public static final String X_MS_ABFS_CLIENT_LATENCY = "x-ms-abfs-client-latency"; + public static final String X_MS_LEASE_ACTION = "x-ms-lease-action"; + public static final String X_MS_LEASE_DURATION = "x-ms-lease-duration"; + public static final String X_MS_LEASE_ID = "x-ms-lease-id"; + public static final String X_MS_PROPOSED_LEASE_ID = "x-ms-proposed-lease-id"; + public static final String X_MS_LEASE_BREAK_PERIOD = "x-ms-lease-break-period"; private HttpHeaderConfigurations() {} } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/annotations/ConfigurationValidationAnnotations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/annotations/ConfigurationValidationAnnotations.java index 82c571a3b03b3..9fbe5a22cdf77 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/annotations/ConfigurationValidationAnnotations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/annotations/ConfigurationValidationAnnotations.java @@ -46,6 +46,22 @@ public class ConfigurationValidationAnnotations { boolean ThrowIfInvalid() default false; } + @Target({ ElementType.FIELD }) + @Retention(RetentionPolicy.RUNTIME) + public @interface IntegerWithOutlierConfigurationValidatorAnnotation { + String ConfigurationKey(); + + int MaxValue() default Integer.MAX_VALUE; + + int MinValue() default Integer.MIN_VALUE; + + int OutlierValue() default Integer.MIN_VALUE; + + int DefaultValue(); + + boolean ThrowIfInvalid() default false; + } + /** * Describes the requirements when validating the annotated long field. */ diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AzureBlobFileSystemException.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AzureBlobFileSystemException.java index 9b1bead886e6c..d829c5ac6779c 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AzureBlobFileSystemException.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AzureBlobFileSystemException.java @@ -37,6 +37,10 @@ public AzureBlobFileSystemException(final String message, final Exception innerE super(message, innerException); } + public AzureBlobFileSystemException(final String message, final Throwable innerThrowable) { + super(message, innerThrowable); + } + @Override public String toString() { if (this.getMessage() == null && this.getCause() == null) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AppendRequestParameters.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AppendRequestParameters.java index fb4d29f87949a..7369bfaf56422 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AppendRequestParameters.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AppendRequestParameters.java @@ -33,17 +33,20 @@ public enum Mode { private final int length; private final Mode mode; private final boolean isAppendBlob; + private final String leaseId; public AppendRequestParameters(final long position, final int offset, final int length, final Mode mode, - final boolean isAppendBlob) { + final boolean isAppendBlob, + final String leaseId) { this.position = position; this.offset = offset; this.length = length; this.mode = mode; this.isAppendBlob = isAppendBlob; + this.leaseId = leaseId; } public long getPosition() { @@ -66,4 +69,7 @@ public boolean isAppendBlob() { return this.isAppendBlob; } + public String getLeaseId() { + return this.leaseId; + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/IntegerConfigurationBasicValidator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/IntegerConfigurationBasicValidator.java index 26c7d2f0ac19c..9d4beb74bbe3f 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/IntegerConfigurationBasicValidator.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/IntegerConfigurationBasicValidator.java @@ -31,11 +31,18 @@ public class IntegerConfigurationBasicValidator extends ConfigurationBasicValidator implements ConfigurationValidator { private final int min; private final int max; + private final int outlier; public IntegerConfigurationBasicValidator(final int min, final int max, final int defaultVal, final String configKey, final boolean throwIfInvalid) { + this(min, min, max, defaultVal, configKey, throwIfInvalid); + } + + public IntegerConfigurationBasicValidator(final int outlier, final int min, final int max, + final int defaultVal, final String configKey, final boolean throwIfInvalid) { super(configKey, defaultVal, throwIfInvalid); this.min = min; this.max = max; + this.outlier = outlier; } public Integer validate(final String configValue) throws InvalidConfigurationValueException { @@ -47,10 +54,14 @@ public Integer validate(final String configValue) throws InvalidConfigurationVal try { result = Integer.parseInt(configValue); // throw an exception if a 'within bounds' value is missing - if (getThrowIfInvalid() && (result < this.min || result > this.max)) { + if (getThrowIfInvalid() && (result != outlier) && (result < this.min || result > this.max)) { throw new InvalidConfigurationValueException(getConfigKey()); } + if (result == outlier) { + return result; + } + // set the value to the nearest bound if it's out of bounds if (result < this.min) { return this.min; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index 92b24f0dda212..7c8a2112bfa46 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -29,16 +29,27 @@ import java.util.ArrayList; import java.util.List; import java.util.Locale; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; -import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; -import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; -import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; -import org.apache.hadoop.fs.azurebfs.constants.HttpQueryParams; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableScheduledFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningScheduledExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; +import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; +import org.apache.hadoop.fs.azurebfs.constants.HttpQueryParams; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidUriException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.SASTokenProviderException; @@ -49,6 +60,8 @@ import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider; import org.apache.hadoop.fs.azurebfs.utils.DateTimeUtils; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; +import org.apache.hadoop.util.concurrent.HadoopExecutors; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.*; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_DELETE_CONSIDERED_IDEMPOTENT; @@ -76,6 +89,8 @@ public class AbfsClient implements Closeable { private SASTokenProvider sasTokenProvider; private final AbfsCounters abfsCounters; + private final ListeningScheduledExecutorService executorService; + private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, final AbfsConfiguration abfsConfiguration, final AbfsClientContext abfsClientContext) { @@ -106,6 +121,11 @@ private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCreden this.userAgent = initializeUserAgent(abfsConfiguration, sslProviderName); this.abfsPerfTracker = abfsClientContext.getAbfsPerfTracker(); this.abfsCounters = abfsClientContext.getAbfsCounters(); + + ThreadFactory tf = + new ThreadFactoryBuilder().setNameFormat("AbfsClient Lease Ops").setDaemon(true).build(); + this.executorService = MoreExecutors.listeningDecorator( + HadoopExecutors.newScheduledThreadPool(this.abfsConfiguration.getNumLeaseThreads(), tf)); } public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, @@ -129,6 +149,7 @@ public void close() throws IOException { if (tokenProvider instanceof Closeable) { IOUtils.cleanupWithLogger(LOG, (Closeable) tokenProvider); } + HadoopExecutors.shutdown(executorService, LOG, 0, TimeUnit.SECONDS); } public String getFileSystem() { @@ -317,6 +338,83 @@ public AbfsRestOperation createPath(final String path, final boolean isFile, fin return op; } + public AbfsRestOperation acquireLease(final String path, int duration) throws AzureBlobFileSystemException { + final List requestHeaders = createDefaultHeaders(); + + requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ACTION, ACQUIRE_LEASE_ACTION)); + requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_DURATION, Integer.toString(duration))); + requestHeaders.add(new AbfsHttpHeader(X_MS_PROPOSED_LEASE_ID, UUID.randomUUID().toString())); + + final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); + + final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); + final AbfsRestOperation op = new AbfsRestOperation( + AbfsRestOperationType.LeasePath, + this, + HTTP_METHOD_POST, + url, + requestHeaders); + op.execute(); + return op; + } + + public AbfsRestOperation renewLease(final String path, final String leaseId) throws AzureBlobFileSystemException { + final List requestHeaders = createDefaultHeaders(); + + requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ACTION, RENEW_LEASE_ACTION)); + requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ID, leaseId)); + + final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); + + final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); + final AbfsRestOperation op = new AbfsRestOperation( + AbfsRestOperationType.LeasePath, + this, + HTTP_METHOD_POST, + url, + requestHeaders); + op.execute(); + return op; + } + + public AbfsRestOperation releaseLease(final String path, final String leaseId) throws AzureBlobFileSystemException { + final List requestHeaders = createDefaultHeaders(); + + requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ACTION, RELEASE_LEASE_ACTION)); + requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ID, leaseId)); + + final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); + + final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); + final AbfsRestOperation op = new AbfsRestOperation( + AbfsRestOperationType.LeasePath, + this, + HTTP_METHOD_POST, + url, + requestHeaders); + op.execute(); + return op; + } + + public AbfsRestOperation breakLease(final String path) throws AzureBlobFileSystemException { + final List requestHeaders = createDefaultHeaders(); + + requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ACTION, BREAK_LEASE_ACTION)); + requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_BREAK_PERIOD, DEFAULT_LEASE_BREAK_PERIOD)); + + final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); + + final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); + final AbfsRestOperation op = new AbfsRestOperation( + AbfsRestOperationType.LeasePath, + this, + HTTP_METHOD_POST, + url, + requestHeaders); + op.execute(); + return op; + } + public AbfsRestOperation renamePath(String source, final String destination, final String continuation) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); @@ -416,6 +514,9 @@ public AbfsRestOperation append(final String path, final byte[] buffer, // PUT and specify the real method in the X-Http-Method-Override header. requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE, HTTP_METHOD_PATCH)); + if (reqParams.getLeaseId() != null) { + requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ID, reqParams.getLeaseId())); + } final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, APPEND_ACTION); @@ -492,13 +593,16 @@ public boolean appendSuccessCheckOp(AbfsRestOperation op, final String path, } public AbfsRestOperation flush(final String path, final long position, boolean retainUncommittedData, - boolean isClose, final String cachedSasToken) + boolean isClose, final String cachedSasToken, final String leaseId) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); // JDK7 does not support PATCH, so to workaround the issue we will use // PUT and specify the real method in the X-Http-Method-Override header. requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE, HTTP_METHOD_PATCH)); + if (leaseId != null) { + requestHeaders.add(new AbfsHttpHeader(X_MS_LEASE_ID, leaseId)); + } final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, FLUSH_ACTION); @@ -1003,4 +1107,21 @@ public SASTokenProvider getSasTokenProvider() { protected AbfsCounters getAbfsCounters() { return abfsCounters; } + + public int getNumLeaseThreads() { + return abfsConfiguration.getNumLeaseThreads(); + } + + public ListenableScheduledFuture schedule(Callable callable, long delay, + TimeUnit timeUnit) { + return executorService.schedule(callable, delay, timeUnit); + } + + public ListenableFuture submit(Runnable runnable) { + return executorService.submit(runnable); + } + + public void addCallback(ListenableFuture future, FutureCallback callback) { + Futures.addCallback(future, callback, executorService); + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsErrors.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsErrors.java new file mode 100644 index 0000000000000..e15795efee68d --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsErrors.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azurebfs.services; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_LEASE_THREADS; + +/** + * ABFS error constants. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class AbfsErrors { + public static final String ERR_WRITE_WITHOUT_LEASE = "Attempted to write to file without lease"; + public static final String ERR_LEASE_EXPIRED = "A lease ID was specified, but the lease for the" + + " resource has expired"; + public static final String ERR_NO_LEASE_ID_SPECIFIED = "There is currently a lease on the " + + "resource and no lease ID was specified in the request"; + public static final String ERR_PARALLEL_ACCESS_DETECTED = "Parallel access to the create path " + + "detected. Failing request to honor single writer semantics"; + public static final String ERR_ACQUIRING_LEASE = "Unable to acquire lease"; + public static final String ERR_LEASE_ALREADY_PRESENT = "There is already a lease present"; + public static final String ERR_LEASE_NOT_PRESENT = "There is currently no lease on the resource"; + public static final String ERR_LEASE_ID_NOT_PRESENT = "The lease ID is not present with the " + + "specified lease operation"; + public static final String ERR_LEASE_DID_NOT_MATCH = "The lease ID specified did not match the " + + "lease ID for the resource with the specified lease operation"; + public static final String ERR_LEASE_BROKEN = "The lease ID matched, but the lease has been " + + "broken explicitly and cannot be renewed"; + public static final String ERR_LEASE_FUTURE_EXISTS = "There is already an existing lease " + + "operation"; + public static final String ERR_NO_LEASE_THREADS = "Lease desired but no lease threads " + + "configured, set " + FS_AZURE_LEASE_THREADS; + + private AbfsErrors() {} +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLease.java new file mode 100644 index 0000000000000..97a8b0228a5b3 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLease.java @@ -0,0 +1,188 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableScheduledFuture; +import org.apache.hadoop.thirdparty.org.checkerframework.checker.nullness.qual.Nullable; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; + +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.INFINITE_LEASE_DURATION; +import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_ACQUIRING_LEASE; +import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_LEASE_FUTURE_EXISTS; +import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_NO_LEASE_THREADS; + +/** + * AbfsLease manages an Azure blob lease. It acquires an infinite lease on instantiation and + * releases the lease when free() is called. Use it to prevent writes to the blob by other + * processes that don't have the lease. + * + * Creating a new Lease object blocks the caller until the Azure blob lease is acquired. It will + * retry a fixed number of times before failing if there is a problem acquiring the lease. + * + * Call free() to release the Lease. If the holder process dies, AzureBlobFileSystem breakLease + * will need to be called before another client will be able to write to the file. + */ +public final class AbfsLease { + private static final Logger LOG = LoggerFactory.getLogger(AbfsLease.class); + + // Number of retries for acquiring lease + static final int DEFAULT_LEASE_ACQUIRE_MAX_RETRIES = 7; + // Retry interval for acquiring lease in secs + static final int DEFAULT_LEASE_ACQUIRE_RETRY_INTERVAL = 10; + + private final AbfsClient client; + private final String path; + + // Lease status variables + private volatile boolean leaseFreed; + private volatile String leaseID = null; + private volatile Throwable exception = null; + private volatile int acquireRetryCount = 0; + private volatile ListenableScheduledFuture future = null; + + public static class LeaseException extends AzureBlobFileSystemException { + public LeaseException(Throwable t) { + super(ERR_ACQUIRING_LEASE + ": " + t, t); + } + + public LeaseException(String s) { + super(s); + } + } + + public AbfsLease(AbfsClient client, String path) throws AzureBlobFileSystemException { + this(client, path, DEFAULT_LEASE_ACQUIRE_MAX_RETRIES, DEFAULT_LEASE_ACQUIRE_RETRY_INTERVAL); + } + + @VisibleForTesting + public AbfsLease(AbfsClient client, String path, int acquireMaxRetries, + int acquireRetryInterval) throws AzureBlobFileSystemException { + this.leaseFreed = false; + this.client = client; + this.path = path; + + if (client.getNumLeaseThreads() < 1) { + throw new LeaseException(ERR_NO_LEASE_THREADS); + } + + // Try to get the lease a specified number of times, else throw an error + RetryPolicy retryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( + acquireMaxRetries, acquireRetryInterval, TimeUnit.SECONDS); + acquireLease(retryPolicy, 0, acquireRetryInterval, 0); + + while (leaseID == null && exception == null) { + try { + future.get(); + } catch (Exception e) { + LOG.debug("Got exception waiting for acquire lease future. Checking if lease ID or " + + "exception have been set", e); + } + } + if (exception != null) { + LOG.error("Failed to acquire lease on {}", path); + throw new LeaseException(exception); + } + + LOG.debug("Acquired lease {} on {}", leaseID, path); + } + + private void acquireLease(RetryPolicy retryPolicy, int numRetries, int retryInterval, long delay) + throws LeaseException { + LOG.debug("Attempting to acquire lease on {}, retry {}", path, numRetries); + if (future != null && !future.isDone()) { + throw new LeaseException(ERR_LEASE_FUTURE_EXISTS); + } + future = client.schedule(() -> client.acquireLease(path, INFINITE_LEASE_DURATION), + delay, TimeUnit.SECONDS); + client.addCallback(future, new FutureCallback() { + @Override + public void onSuccess(@Nullable AbfsRestOperation op) { + leaseID = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_LEASE_ID); + LOG.debug("Acquired lease {} on {}", leaseID, path); + } + + @Override + public void onFailure(Throwable throwable) { + try { + if (RetryPolicy.RetryAction.RetryDecision.RETRY + == retryPolicy.shouldRetry(null, numRetries, 0, true).action) { + LOG.debug("Failed to acquire lease on {}, retrying: {}", path, throwable); + acquireRetryCount++; + acquireLease(retryPolicy, numRetries + 1, retryInterval, retryInterval); + } else { + exception = throwable; + } + } catch (Exception e) { + exception = throwable; + } + } + }); + } + + /** + * Cancel future and free the lease. If an exception occurs while releasing the lease, the error + * will be logged. If the lease cannot be released, AzureBlobFileSystem breakLease will need to + * be called before another client will be able to write to the file. + */ + public void free() { + if (leaseFreed) { + return; + } + try { + LOG.debug("Freeing lease: path {}, lease id {}", path, leaseID); + if (future != null && !future.isDone()) { + future.cancel(true); + } + client.releaseLease(path, leaseID); + } catch (IOException e) { + LOG.warn("Exception when trying to release lease {} on {}. Lease will need to be broken: {}", + leaseID, path, e.getMessage()); + } finally { + // Even if releasing the lease fails (e.g. because the file was deleted), + // make sure to record that we freed the lease + leaseFreed = true; + LOG.debug("Freed lease {} on {}", leaseID, path); + } + } + + public boolean isFreed() { + return leaseFreed; + } + + public String getLeaseID() { + return leaseID; + } + + @VisibleForTesting + public int getAcquireRetryCount() { + return acquireRetryCount; + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index 2d02019ab11c6..80b35ee4d3a91 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -37,6 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters; @@ -53,6 +54,7 @@ import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; +import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_WRITE_WITHOUT_LEASE; import static org.apache.hadoop.fs.impl.StoreImplementationUtils.isProbeForSyncable; import static org.apache.hadoop.io.IOUtils.wrapException; import static org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode.APPEND_MODE; @@ -92,6 +94,9 @@ public class AbfsOutputStream extends OutputStream implements Syncable, // SAS tokens can be re-used until they expire private CachedSASToken cachedSasToken; + private AbfsLease lease; + private String leaseId; + /** * Queue storing buffers with the size of the Azure block ready for * reuse. The pool allows reusing the blocks instead of allocating new @@ -142,6 +147,10 @@ public AbfsOutputStream( } this.maxRequestsThatCanBeQueued = abfsOutputStreamContext .getMaxWriteRequestsToQueue(); + + this.lease = abfsOutputStreamContext.getLease(); + this.leaseId = abfsOutputStreamContext.getLeaseId(); + this.threadExecutor = new ThreadPoolExecutor(maxConcurrentRequestCount, maxConcurrentRequestCount, @@ -203,6 +212,10 @@ public synchronized void write(final byte[] data, final int off, final int lengt throw new IndexOutOfBoundsException(); } + if (hasLease() && isLeaseFreed()) { + throw new PathIOException(path, ERR_WRITE_WITHOUT_LEASE); + } + int currentOffset = off; int writableBytes = bufferSize - bufferIndex; int numberOfBytesToWrite = length; @@ -306,6 +319,10 @@ public synchronized void close() throws IOException { // See HADOOP-16785 throw wrapException(path, e.getMessage(), e); } finally { + if (hasLease()) { + lease.free(); + lease = null; + } lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED); buffer = null; bufferIndex = 0; @@ -372,7 +389,7 @@ private void writeAppendBlobCurrentBufferToService() throws IOException { try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "writeCurrentBufferToService", "append")) { AppendRequestParameters reqParams = new AppendRequestParameters(offset, 0, - bytesLength, APPEND_MODE, true); + bytesLength, APPEND_MODE, true, leaseId); AbfsRestOperation op = client.append(path, bytes, reqParams, cachedSasToken.get()); cachedSasToken.update(op.getSasToken()); if (outputStreamStatistics != null) { @@ -448,7 +465,7 @@ private synchronized void writeCurrentBufferToService(boolean isFlush, boolean i mode = FLUSH_MODE; } AppendRequestParameters reqParams = new AppendRequestParameters( - offset, 0, bytesLength, mode, false); + offset, 0, bytesLength, mode, false, leaseId); AbfsRestOperation op = client.append(path, bytes, reqParams, cachedSasToken.get()); cachedSasToken.update(op.getSasToken()); @@ -517,7 +534,8 @@ private synchronized void flushWrittenBytesToServiceInternal(final long offset, AbfsPerfTracker tracker = client.getAbfsPerfTracker(); try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "flushWrittenBytesToServiceInternal", "flush")) { - AbfsRestOperation op = client.flush(path, offset, retainUncommitedData, isClose, cachedSasToken.get()); + AbfsRestOperation op = client.flush(path, offset, retainUncommitedData, isClose, + cachedSasToken.get(), leaseId); cachedSasToken.update(op.getSasToken()); perfInfo.registerResult(op.getResult()).registerSuccess(true); } catch (AzureBlobFileSystemException ex) { @@ -637,6 +655,19 @@ public IOStatistics getIOStatistics() { return ioStatistics; } + @VisibleForTesting + public boolean isLeaseFreed() { + if (lease == null) { + return true; + } + return lease.isFreed(); + } + + @VisibleForTesting + public boolean hasLease() { + return lease != null; + } + /** * Appending AbfsOutputStream statistics to base toString(). * diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java index 925cd4f7b5646..48f6f54081053 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java @@ -39,6 +39,8 @@ public class AbfsOutputStreamContext extends AbfsStreamContext { private int maxWriteRequestsToQueue; + private AbfsLease lease; + public AbfsOutputStreamContext(final long sasTokenRenewPeriodForStreamsInSeconds) { super(sasTokenRenewPeriodForStreamsInSeconds); } @@ -94,6 +96,11 @@ public AbfsOutputStreamContext withMaxWriteRequestsToQueue( return this; } + public AbfsOutputStreamContext withLease(final AbfsLease lease) { + this.lease = lease; + return this; + } + public int getWriteBufferSize() { return writeBufferSize; } @@ -125,4 +132,15 @@ public int getMaxWriteRequestsToQueue() { public boolean isEnableSmallWriteOptimization() { return this.enableSmallWriteOptimization; } + + public AbfsLease getLease() { + return this.lease; + } + + public String getLeaseId() { + if (this.lease == null) { + return null; + } + return this.lease.getLeaseID(); + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index 584b71f1ee5bf..b046cbc03a30b 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -131,6 +131,7 @@ String getSasToken() { this.url = url; this.requestHeaders = requestHeaders; this.hasRequestBody = (AbfsHttpConstants.HTTP_METHOD_PUT.equals(method) + || AbfsHttpConstants.HTTP_METHOD_POST.equals(method) || AbfsHttpConstants.HTTP_METHOD_PATCH.equals(method)); this.sasToken = sasToken; this.abfsCounters = client.getAbfsCounters(); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperationType.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperationType.java index d3031860dd1c2..830297f381b91 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperationType.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperationType.java @@ -40,5 +40,6 @@ public enum AbfsRestOperationType { Flush, ReadFile, DeletePath, - CheckAccess + CheckAccess, + LeasePath, } diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md index 33d4a0fa428a0..6be5952b03aa6 100644 --- a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md +++ b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md @@ -887,6 +887,22 @@ enabled for your Azure Storage account." The directories can be specified as comma separated values. By default the value is "/hbase" +### Infinite Lease Options +`fs.azure.infinite-lease.directories`: Directories for infinite lease support +can be specified comma separated in this config. By default, multiple +clients will be able to write to the same file simultaneously. When writing +to files contained within the directories specified in this config, the +client will obtain a lease on the file that will prevent any other clients +from writing to the file. When the output stream is closed, the lease will be +released. To revoke a client's write access for a file, the +AzureBlobFilesystem breakLease method may be called. If the client dies +before the file can be closed and the lease released, breakLease will need to +be called before another client will be able to write to the file. + +`fs.azure.lease.threads`: This is the size of the thread pool that will be +used for lease operations for infinite lease directories. By default the value +is 0, so it must be set to at least 1 to support infinite lease directories. + ### Perf Options #### 1. HTTP Request Tracking Options diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java new file mode 100644 index 0000000000000..9857da8957e22 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java @@ -0,0 +1,336 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; +import java.util.concurrent.RejectedExecutionException; + +import org.junit.Assert; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsLease; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; + +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; + +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_INFINITE_LEASE_KEY; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_LEASE_THREADS; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT; +import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_ACQUIRING_LEASE; +import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_LEASE_EXPIRED; +import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_LEASE_NOT_PRESENT; +import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_NO_LEASE_ID_SPECIFIED; +import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_NO_LEASE_THREADS; +import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_PARALLEL_ACCESS_DETECTED; + +/** + * Test lease operations. + */ +public class ITestAzureBlobFileSystemLease extends AbstractAbfsIntegrationTest { + private static final int TEST_EXECUTION_TIMEOUT = 30 * 1000; + private static final int LONG_TEST_EXECUTION_TIMEOUT = 90 * 1000; + private static final String TEST_FILE = "testfile"; + private final boolean isHNSEnabled; + + public ITestAzureBlobFileSystemLease() throws Exception { + super(); + + this.isHNSEnabled = getConfiguration() + .getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false); + } + + private AzureBlobFileSystem getCustomFileSystem(Path infiniteLeaseDirs, int numLeaseThreads) throws Exception { + Configuration conf = getRawConfiguration(); + conf.setBoolean(String.format("fs.%s.impl.disable.cache", getAbfsScheme()), true); + conf.set(FS_AZURE_INFINITE_LEASE_KEY, infiniteLeaseDirs.toUri().getPath()); + conf.setInt(FS_AZURE_LEASE_THREADS, numLeaseThreads); + return getFileSystem(conf); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testNoInfiniteLease() throws IOException { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getFileSystem(); + fs.mkdirs(testFilePath.getParent()); + try (FSDataOutputStream out = fs.create(testFilePath)) { + Assert.assertFalse("Output stream should not have lease", + ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + } + Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testNoLeaseThreads() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 0); + fs.mkdirs(testFilePath.getParent()); + LambdaTestUtils.intercept(IOException.class, ERR_NO_LEASE_THREADS, () -> { + try (FSDataOutputStream out = fs.create(testFilePath)) { + } + return "No failure when lease requested with 0 lease threads"; + }); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testOneWriter() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); + fs.mkdirs(testFilePath.getParent()); + + FSDataOutputStream out = fs.create(testFilePath); + Assert.assertTrue("Output stream should have lease", + ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + out.close(); + Assert.assertFalse("Output stream should not have lease", + ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testSubDir() throws Exception { + final Path testFilePath = new Path(new Path(path(methodName.getMethodName()), "subdir"), + TEST_FILE); + final AzureBlobFileSystem fs = + getCustomFileSystem(testFilePath.getParent().getParent(), 1); + fs.mkdirs(testFilePath.getParent().getParent()); + + FSDataOutputStream out = fs.create(testFilePath); + Assert.assertTrue("Output stream should have lease", + ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + out.close(); + Assert.assertFalse("Output stream should not have lease", + ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testTwoCreate() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); + fs.mkdirs(testFilePath.getParent()); + + try (FSDataOutputStream out = fs.create(testFilePath)) { + LambdaTestUtils.intercept(IOException.class, isHNSEnabled ? ERR_PARALLEL_ACCESS_DETECTED + : ERR_NO_LEASE_ID_SPECIFIED, () -> { + try (FSDataOutputStream out2 = fs.create(testFilePath)) { + } + return "Expected second create on infinite lease dir to fail"; + }); + } + Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + } + + private void twoWriters(AzureBlobFileSystem fs, Path testFilePath, boolean expectException) throws Exception { + try (FSDataOutputStream out = fs.create(testFilePath)) { + try (FSDataOutputStream out2 = fs.append(testFilePath)) { + out2.writeInt(2); + out2.hsync(); + } catch (IOException e) { + if (expectException) { + GenericTestUtils.assertExceptionContains(ERR_ACQUIRING_LEASE, e); + } else { + throw e; + } + } + out.writeInt(1); + out.hsync(); + } + + Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testTwoWritersCreateAppendNoInfiniteLease() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getFileSystem(); + fs.mkdirs(testFilePath.getParent()); + + twoWriters(fs, testFilePath, false); + } + + @Test(timeout = LONG_TEST_EXECUTION_TIMEOUT) + public void testTwoWritersCreateAppendWithInfiniteLeaseEnabled() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); + fs.mkdirs(testFilePath.getParent()); + + twoWriters(fs, testFilePath, true); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testLeaseFreedOnClose() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); + fs.mkdirs(testFilePath.getParent()); + + FSDataOutputStream out; + out = fs.create(testFilePath); + out.write(0); + Assert.assertTrue("Output stream should have lease", + ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + out.close(); + Assert.assertFalse("Output stream should not have lease after close", + ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testWriteAfterBreakLease() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); + fs.mkdirs(testFilePath.getParent()); + + FSDataOutputStream out; + out = fs.create(testFilePath); + out.write(0); + out.hsync(); + + fs.breakLease(testFilePath); + + LambdaTestUtils.intercept(IOException.class, ERR_LEASE_EXPIRED, () -> { + out.write(1); + out.hsync(); + return "Expected exception on write after lease break but got " + out; + }); + + LambdaTestUtils.intercept(IOException.class, ERR_LEASE_EXPIRED, () -> { + out.close(); + return "Expected exception on close after lease break but got " + out; + }); + + Assert.assertTrue("Output stream lease should be freed", + ((AbfsOutputStream) out.getWrappedStream()).isLeaseFreed()); + + try (FSDataOutputStream out2 = fs.append(testFilePath)) { + out2.write(2); + out2.hsync(); + } + + Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + } + + @Test(timeout = LONG_TEST_EXECUTION_TIMEOUT) + public void testLeaseFreedAfterBreak() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); + fs.mkdirs(testFilePath.getParent()); + + FSDataOutputStream out = fs.create(testFilePath); + out.write(0); + + fs.breakLease(testFilePath); + + LambdaTestUtils.intercept(IOException.class, ERR_LEASE_EXPIRED, () -> { + out.close(); + return "Expected exception on close after lease break but got " + out; + }); + + Assert.assertTrue("Output stream lease should be freed", + ((AbfsOutputStream) out.getWrappedStream()).isLeaseFreed()); + + Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testInfiniteLease() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); + fs.mkdirs(testFilePath.getParent()); + + try (FSDataOutputStream out = fs.create(testFilePath)) { + Assert.assertTrue("Output stream should have lease", + ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + out.write(0); + } + Assert.assertTrue(fs.getAbfsStore().areLeasesFreed()); + + try (FSDataOutputStream out = fs.append(testFilePath)) { + Assert.assertTrue("Output stream should have lease", + ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + out.write(1); + } + Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testFileSystemClose() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); + fs.mkdirs(testFilePath.getParent()); + + FSDataOutputStream out = fs.create(testFilePath); + out.write(0); + Assert.assertFalse("Store leases should exist", fs.getAbfsStore().areLeasesFreed()); + fs.close(); + Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + + LambdaTestUtils.intercept(IOException.class, isHNSEnabled ? ERR_LEASE_NOT_PRESENT + : ERR_LEASE_EXPIRED, () -> { + out.close(); + return "Expected exception on close after closed FS but got " + out; + }); + + LambdaTestUtils.intercept(RejectedExecutionException.class, () -> { + try (FSDataOutputStream out2 = fs.append(testFilePath)) { + } + return "Expected exception on new append after closed FS"; + }); + } + + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testAcquireRetry() throws Exception { + final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); + final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); + fs.mkdirs(testFilePath.getParent()); + fs.createNewFile(testFilePath); + + AbfsLease lease = new AbfsLease(fs.getAbfsClient(), testFilePath.toUri().getPath()); + Assert.assertNotNull("Did not successfully lease file", lease.getLeaseID()); + lease.free(); + Assert.assertEquals("Unexpected acquire retry count", 0, lease.getAcquireRetryCount()); + + AbfsClient mockClient = spy(fs.getAbfsClient()); + + doThrow(new AbfsLease.LeaseException("failed to acquire 1")) + .doThrow(new AbfsLease.LeaseException("failed to acquire 2")) + .doCallRealMethod() + .when(mockClient).acquireLease(anyString(), anyInt()); + + lease = new AbfsLease(mockClient, testFilePath.toUri().getPath(), 5, 1); + Assert.assertNotNull("Acquire lease should have retried", lease.getLeaseID()); + lease.free(); + Assert.assertEquals("Unexpected acquire retry count", 2, lease.getAcquireRetryCount()); + + doThrow(new AbfsLease.LeaseException("failed to acquire")) + .when(mockClient).acquireLease(anyString(), anyInt()); + + LambdaTestUtils.intercept(AzureBlobFileSystemException.class, () -> { + new AbfsLease(mockClient, testFilePath.toUri().getPath(), 5, 1); + }); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/diagnostics/TestConfigurationValidators.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/diagnostics/TestConfigurationValidators.java index f02eadc9a0491..6a02435fc6e5e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/diagnostics/TestConfigurationValidators.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/diagnostics/TestConfigurationValidators.java @@ -24,11 +24,14 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationValueException; import org.apache.hadoop.fs.azurebfs.utils.Base64; -import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MIN_BUFFER_SIZE; -import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MAX_BUFFER_SIZE; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_LEASE_DURATION; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_READ_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_WRITE_BUFFER_SIZE; - +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.INFINITE_LEASE_DURATION; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MAX_BUFFER_SIZE; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MAX_LEASE_DURATION; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MIN_BUFFER_SIZE; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MIN_LEASE_DURATION; /** * Test configuration validators. @@ -58,6 +61,26 @@ public void testIntegerConfigValidatorThrowsIfMissingValidValue() throws Excepti integerConfigurationValidator.validate("3072"); } + @Test + public void testIntegerWithOutlierConfigValidator() throws Exception { + IntegerConfigurationBasicValidator integerConfigurationValidator = new IntegerConfigurationBasicValidator( + INFINITE_LEASE_DURATION, MIN_LEASE_DURATION, MAX_LEASE_DURATION, DEFAULT_LEASE_DURATION, FAKE_KEY, + false); + + assertEquals(INFINITE_LEASE_DURATION, (int) integerConfigurationValidator.validate("-1")); + assertEquals(DEFAULT_LEASE_DURATION, (int) integerConfigurationValidator.validate(null)); + assertEquals(MIN_LEASE_DURATION, (int) integerConfigurationValidator.validate("15")); + assertEquals(MAX_LEASE_DURATION, (int) integerConfigurationValidator.validate("60")); + } + + @Test(expected = InvalidConfigurationValueException.class) + public void testIntegerWithOutlierConfigValidatorThrowsIfMissingValidValue() throws Exception { + IntegerConfigurationBasicValidator integerConfigurationValidator = new IntegerConfigurationBasicValidator( + INFINITE_LEASE_DURATION, MIN_LEASE_DURATION, MAX_LEASE_DURATION, DEFAULT_LEASE_DURATION, FAKE_KEY, + true); + integerConfigurationValidator.validate("14"); + } + @Test public void testLongConfigValidator() throws Exception { LongConfigurationBasicValidator longConfigurationValidator = new LongConfigurationBasicValidator( diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java index 1e6b8efe6d9d2..f4243bc7e287b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.ArgumentMatchers.refEq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -86,7 +87,7 @@ public void verifyShortWriteRequest() throws Exception { AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); when(client.getAbfsPerfTracker()).thenReturn(tracker); when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); @@ -104,9 +105,9 @@ public void verifyShortWriteRequest() throws Exception { out.hsync(); AppendRequestParameters firstReqParameters = new AppendRequestParameters( - 0, 0, WRITE_SIZE, APPEND_MODE, false); + 0, 0, WRITE_SIZE, APPEND_MODE, false, null); AppendRequestParameters secondReqParameters = new AppendRequestParameters( - WRITE_SIZE, 0, 2 * WRITE_SIZE, APPEND_MODE, false); + WRITE_SIZE, 0, 2 * WRITE_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); @@ -133,7 +134,7 @@ public void verifyWriteRequest() throws Exception { when(client.getAbfsPerfTracker()).thenReturn(tracker); when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); @@ -146,9 +147,9 @@ public void verifyWriteRequest() throws Exception { out.close(); AppendRequestParameters firstReqParameters = new AppendRequestParameters( - 0, 0, BUFFER_SIZE, APPEND_MODE, false); + 0, 0, BUFFER_SIZE, APPEND_MODE, false, null); AppendRequestParameters secondReqParameters = new AppendRequestParameters( - BUFFER_SIZE, 0, 5*WRITE_SIZE-BUFFER_SIZE, APPEND_MODE, false); + BUFFER_SIZE, 0, 5*WRITE_SIZE-BUFFER_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); @@ -165,7 +166,7 @@ public void verifyWriteRequest() throws Exception { ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), - acFlushSASToken.capture()); + acFlushSASToken.capture(), isNull()); assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues()); assertThat(Arrays.asList(Long.valueOf(5*WRITE_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues()); assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); @@ -189,7 +190,7 @@ public void verifyWriteRequestOfBufferSizeAndClose() throws Exception { when(client.getAbfsPerfTracker()).thenReturn(tracker); when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); when(op.getSasToken()).thenReturn("testToken"); when(op.getResult()).thenReturn(httpOp); @@ -204,9 +205,9 @@ public void verifyWriteRequestOfBufferSizeAndClose() throws Exception { out.close(); AppendRequestParameters firstReqParameters = new AppendRequestParameters( - 0, 0, BUFFER_SIZE, APPEND_MODE, false); + 0, 0, BUFFER_SIZE, APPEND_MODE, false, null); AppendRequestParameters secondReqParameters = new AppendRequestParameters( - BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false); + BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); @@ -223,7 +224,7 @@ public void verifyWriteRequestOfBufferSizeAndClose() throws Exception { ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), - acFlushSASToken.capture()); + acFlushSASToken.capture(), isNull()); assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues()); assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues()); assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); @@ -247,7 +248,7 @@ public void verifyWriteRequestOfBufferSize() throws Exception { when(client.getAbfsPerfTracker()).thenReturn(tracker); when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); when(op.getSasToken()).thenReturn("testToken"); when(op.getResult()).thenReturn(httpOp); @@ -262,9 +263,9 @@ public void verifyWriteRequestOfBufferSize() throws Exception { Thread.sleep(1000); AppendRequestParameters firstReqParameters = new AppendRequestParameters( - 0, 0, BUFFER_SIZE, APPEND_MODE, false); + 0, 0, BUFFER_SIZE, APPEND_MODE, false, null); AppendRequestParameters secondReqParameters = new AppendRequestParameters( - BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false); + BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); @@ -291,7 +292,7 @@ public void verifyWriteRequestOfBufferSizeWithAppendBlob() throws Exception { when(client.getAbfsPerfTracker()).thenReturn(tracker); when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, true)); @@ -304,9 +305,9 @@ public void verifyWriteRequestOfBufferSizeWithAppendBlob() throws Exception { Thread.sleep(1000); AppendRequestParameters firstReqParameters = new AppendRequestParameters( - 0, 0, BUFFER_SIZE, APPEND_MODE, true); + 0, 0, BUFFER_SIZE, APPEND_MODE, true, null); AppendRequestParameters secondReqParameters = new AppendRequestParameters( - BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, true); + BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, true, null); verify(client, times(1)).append( eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); @@ -334,7 +335,7 @@ public void verifyWriteRequestOfBufferSizeAndHFlush() throws Exception { when(client.getAbfsPerfTracker()).thenReturn(tracker); when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); @@ -347,9 +348,9 @@ public void verifyWriteRequestOfBufferSizeAndHFlush() throws Exception { out.hflush(); AppendRequestParameters firstReqParameters = new AppendRequestParameters( - 0, 0, BUFFER_SIZE, APPEND_MODE, false); + 0, 0, BUFFER_SIZE, APPEND_MODE, false, null); AppendRequestParameters secondReqParameters = new AppendRequestParameters( - BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false); + BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); @@ -366,7 +367,7 @@ public void verifyWriteRequestOfBufferSizeAndHFlush() throws Exception { ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), - acFlushSASToken.capture()); + acFlushSASToken.capture(), isNull()); assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues()); assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues()); assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); @@ -388,7 +389,7 @@ public void verifyWriteRequestOfBufferSizeAndFlush() throws Exception { AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); when(client.getAbfsPerfTracker()).thenReturn(tracker); when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); - when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op); AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); @@ -403,9 +404,9 @@ public void verifyWriteRequestOfBufferSizeAndFlush() throws Exception { Thread.sleep(1000); AppendRequestParameters firstReqParameters = new AppendRequestParameters( - 0, 0, BUFFER_SIZE, APPEND_MODE, false); + 0, 0, BUFFER_SIZE, APPEND_MODE, false, null); AppendRequestParameters secondReqParameters = new AppendRequestParameters( - BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false); + BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null); verify(client, times(1)).append( eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); From 4e44bfcc33d30f45e9e166eef62f218057048dc9 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 12 Apr 2021 20:44:31 -0700 Subject: [PATCH 227/666] Revert "HDFS-15423 RBF: WebHDFS create shouldn't choose DN from all sub-clusters (#2605)" (#2900) This reverts commit cb3ed32fe0d937980999d590deb35ad97d40f9e6. --- .../router/RouterWebHdfsMethods.java | 47 +----- .../router/web/RouterWebHDFSContract.java | 2 - .../federation/MiniRouterDFSCluster.java | 19 ++- .../router/TestRouterWebHdfsMethods.java | 147 ------------------ 4 files changed, 17 insertions(+), 198 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index afc4a3d8fac21..f6ac70c368a52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -93,7 +93,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -105,8 +104,6 @@ import java.util.HashSet; import java.util.List; import java.util.Random; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** * WebHDFS Router implementation. This is an extension of @@ -456,33 +453,21 @@ private DatanodeInfo chooseDatanode(final Router router, final String path, final HttpOpParam.Op op, final long openOffset, final String excludeDatanodes) throws IOException { final RouterRpcServer rpcServer = getRPCServer(router); - DatanodeInfo[] dns = {}; - String resolvedNs = ""; + DatanodeInfo[] dns = null; try { dns = rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE); } catch (IOException e) { LOG.error("Cannot get the datanodes from the RPC server", e); } - if (op == PutOpParam.Op.CREATE) { - try { - resolvedNs = rpcServer.getCreateLocation(path).getNameserviceId(); - } catch (IOException e) { - LOG.error("Cannot get the name service " + - "to create file for path {} ", path, e); - } - } - HashSet excludes = new HashSet(); - Collection collection = - getTrimmedStringCollection(excludeDatanodes); - for (DatanodeInfo dn : dns) { - String ns = getNsFromDataNodeNetworkLocation(dn.getNetworkLocation()); - if (collection.contains(dn.getName())) { - excludes.add(dn); - } else if (op == PutOpParam.Op.CREATE && !ns.equals(resolvedNs)) { - // for CREATE, the dest dn should be in the resolved ns - excludes.add(dn); + if (excludeDatanodes != null) { + Collection collection = + getTrimmedStringCollection(excludeDatanodes); + for (DatanodeInfo dn : dns) { + if (collection.contains(dn.getName())) { + excludes.add(dn); + } } } @@ -517,22 +502,6 @@ private DatanodeInfo chooseDatanode(final Router router, return getRandomDatanode(dns, excludes); } - /** - * Get the nameservice info from datanode network location. - * @param location network location with format `/ns0/rack1` - * @return nameservice this datanode is in - */ - @VisibleForTesting - public static String getNsFromDataNodeNetworkLocation(String location) { - // network location should be in the format of /ns/rack - Pattern pattern = Pattern.compile("^/([^/]*)/"); - Matcher matcher = pattern.matcher(location); - if (matcher.find()) { - return matcher.group(1); - } - return ""; - } - /** * Get a random Datanode from a subcluster. * @param dns Nodes to be chosen from. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java index 6b90faecc78f3..1d308073290d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java @@ -64,8 +64,6 @@ public static void createCluster(Configuration conf) throws IOException { conf.addResource(CONTRACT_WEBHDFS_XML); cluster = new MiniRouterDFSCluster(true, 2, conf); - cluster.setIndependentDNs(); - cluster.setNumDatanodesPerNameservice(3); // Start NNs and DNs and wait until ready cluster.startCluster(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index 8a7a03e018b95..896d08f2c49b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -774,15 +774,6 @@ public void startCluster(Configuration overrideConf) { } topology.setFederation(true); - // Generate conf for namenodes and datanodes - String ns0 = nameservices.get(0); - Configuration nnConf = generateNamenodeConfiguration(ns0); - if (overrideConf != null) { - nnConf.addResource(overrideConf); - // Router also uses this configurations as initial values. - routerConf = new Configuration(overrideConf); - } - // Set independent DNs across subclusters int numDNs = nameservices.size() * numDatanodesPerNameservice; Configuration[] dnConfs = null; @@ -790,7 +781,7 @@ public void startCluster(Configuration overrideConf) { dnConfs = new Configuration[numDNs]; int dnId = 0; for (String nsId : nameservices) { - Configuration subclusterConf = new Configuration(nnConf); + Configuration subclusterConf = new Configuration(); subclusterConf.set(DFS_INTERNAL_NAMESERVICES_KEY, nsId); for (int i = 0; i < numDatanodesPerNameservice; i++) { dnConfs[dnId] = subclusterConf; @@ -800,6 +791,14 @@ public void startCluster(Configuration overrideConf) { } // Start mini DFS cluster + String ns0 = nameservices.get(0); + Configuration nnConf = generateNamenodeConfiguration(ns0); + if (overrideConf != null) { + nnConf.addResource(overrideConf); + // Router also uses this configurations as initial values. + routerConf = new Configuration(overrideConf); + } + cluster = new MiniDFSCluster.Builder(nnConf) .numDataNodes(numDNs) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java deleted file mode 100644 index 7028928041452..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.federation.router; - -import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createMountTableEntry; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -import java.io.FileNotFoundException; -import java.net.HttpURLConnection; -import java.net.URL; -import java.util.Collections; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; -import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; -import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; -import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Test suite for Router Web Hdfs methods. - */ -public class TestRouterWebHdfsMethods { - static final Logger LOG = - LoggerFactory.getLogger(TestRouterWebHdfsMethods.class); - - private static StateStoreDFSCluster cluster; - private static RouterContext router; - private static String httpUri; - - @BeforeClass - public static void globalSetUp() throws Exception { - cluster = new StateStoreDFSCluster(false, 2); - Configuration conf = new RouterConfigBuilder() - .stateStore() - .rpc() - .http() - .admin() - .build(); - cluster.addRouterOverrides(conf); - cluster.startCluster(); - cluster.startRouters(); - cluster.waitClusterUp(); - router = cluster.getRandomRouter(); - httpUri = "http://"+router.getHttpAddress(); - } - - @AfterClass - public static void tearDown() { - if (cluster != null) { - cluster.shutdown(); - cluster = null; - } - } - - @Test - public void testWebHdfsCreate() throws Exception { - // the file is created at default ns (ns0) - String path = "/tmp/file"; - URL url = new URL(getUri(path)); - LOG.info("URL: {}", url); - HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - conn.setRequestMethod("PUT"); - assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); - verifyFile("ns0", path, true); - verifyFile("ns1", path, false); - conn.disconnect(); - } - - @Test - public void testWebHdfsCreateWithMounts() throws Exception { - // the file is created at mounted ns (ns1) - String mountPoint = "/tmp-ns1"; - String path = "/tmp-ns1/file"; - createMountTableEntry( - router.getRouter(), mountPoint, - DestinationOrder.RANDOM, Collections.singletonList("ns1")); - URL url = new URL(getUri(path)); - LOG.info("URL: {}", url); - HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - conn.setRequestMethod("PUT"); - assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); - verifyFile("ns1", path, true); - verifyFile("ns0", path, false); - conn.disconnect(); - } - - private String getUri(String path) { - final String user = System.getProperty("user.name"); - final StringBuilder uri = new StringBuilder(httpUri); - uri.append("/webhdfs/v1"). - append(path). - append("?op=CREATE"). - append("&user.name=" + user); - return uri.toString(); - } - - private void verifyFile(String ns, String path, boolean shouldExist) - throws Exception { - FileSystem fs = cluster.getNamenode(ns, null).getFileSystem(); - try { - fs.getFileStatus(new Path(path)); - if (!shouldExist) { - fail(path + " should not exist in ns " + ns); - } - } catch (FileNotFoundException e) { - if (shouldExist) { - fail(path + " should exist in ns " + ns); - } - } - } - - @Test - public void testGetNsFromDataNodeNetworkLocation() { - assertEquals("ns0", RouterWebHdfsMethods - .getNsFromDataNodeNetworkLocation("/ns0/rack-info1")); - assertEquals("ns0", RouterWebHdfsMethods - .getNsFromDataNodeNetworkLocation("/ns0/row1/rack-info1")); - assertEquals("", RouterWebHdfsMethods - .getNsFromDataNodeNetworkLocation("/row0")); - assertEquals("", RouterWebHdfsMethods - .getNsFromDataNodeNetworkLocation("whatever-rack-info1")); - } -} From 16b69f6e1c36b2ecefad229f89fb77fd6c0614c9 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Tue, 13 Apr 2021 17:08:49 +0900 Subject: [PATCH 228/666] HADOOP-17630. [JDK 15] TestPrintableString fails due to Unicode 13.0 support. (#2890) Reviewed-by: Wei-Chiu Chuang --- .../java/org/apache/hadoop/fs/shell/TestPrintableString.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java index 8e09fc29744fe..91bfdd6d3948c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java @@ -76,8 +76,8 @@ public void testNonPrintableCharacters() throws Exception { "x\uDB80\uDC00y\uDBFF\uDFFDz\u1050", "x?y?z\u1050"); // Unassigned Unicode - expect("Should replace unassigned U+30000 and U+DFFFF", - "-\uD880\uDC00-\uDB3F\uDFFF-", "-?-?-"); + expect("Should replace unassigned U+DFFFF", + "-\uDB3F\uDFFF-", "-?-"); // Standalone surrogate character (not in a pair) expect("Should replace standalone surrogate U+DB80", "x\uDB80yz", "x?yz"); From 131db00e6cfe8e9033b9a4b511819e9b3a7c7ddb Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Tue, 13 Apr 2021 21:24:45 +0530 Subject: [PATCH 229/666] HDFS-15971. Make mkstemp cross platform (#2898) --- .../src/CMakeLists.txt | 3 +- .../libhdfs-tests/test_libhdfs_mini_stress.c | 4 +- .../libhdfspp/lib/x-platform/c-api/syscall.cc | 22 ++++++- .../libhdfspp/lib/x-platform/c-api/syscall.h | 3 + .../native/libhdfspp/lib/x-platform/syscall.h | 29 +++++++++ .../libhdfspp/lib/x-platform/syscall_linux.cc | 11 ++++ .../lib/x-platform/syscall_windows.cc | 30 +++++++++ .../native/libhdfspp/tests/CMakeLists.txt | 13 ++-- .../libhdfspp/tests/configuration_test.cc | 23 ++++--- .../libhdfspp/tests/configuration_test.h | 64 +++++++++++++++---- .../libhdfspp/tests/hdfs_builder_test.cc | 4 +- .../tests/hdfs_configuration_test.cc | 16 ++--- .../tests/x-platform/syscall_common_test.cc | 18 ++++++ 13 files changed, 199 insertions(+), 41 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt index baf5f8cc30e79..4e943de1773f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt @@ -89,7 +89,8 @@ function(build_libhdfs_test NAME LIBRARY) list(APPEND FILES ${CMAKE_SOURCE_DIR}/main/native/libhdfs-tests/${FIL}) endif() endforeach() - add_executable("${NAME}_${LIBRARY}" ${FILES}) + add_executable("${NAME}_${LIBRARY}" $ $ ${FILES}) + target_include_directories("${NAME}_${LIBRARY}" PRIVATE main/native/libhdfspp/lib) endfunction() function(add_libhdfs_test NAME LIBRARY) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c index 846852bfd0e88..1641470733f2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c @@ -22,6 +22,7 @@ #include "hdfspp/hdfs_ext.h" #include "native_mini_dfs.h" #include "os/thread.h" +#include "x-platform/c-api/syscall.h" #include #include @@ -126,7 +127,8 @@ static int hdfsCurlData(const char *host, const tPort port, const char *dirNm, EXPECT_NONNULL(pw = getpwuid(uid)); int fd = -1; - EXPECT_NONNEGATIVE(fd = mkstemp(tmpFile)); + EXPECT_NONNEGATIVE(fd = x_platform_syscall_create_and_open_temp_file( + tmpFile, sizeof tmpFile)); tSize sz = 0; while (sz < fileSz) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc index cca9f6a024807..0bb5fc15ef631 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc @@ -18,6 +18,26 @@ #include "x-platform/syscall.h" -extern "C" int x_platform_syscall_write_to_stdout(const char* msg) { +#include +#include + +extern "C" { +int x_platform_syscall_write_to_stdout(const char* msg) { return XPlatform::Syscall::WriteToStdout(msg) ? 1 : 0; } + +int x_platform_syscall_create_and_open_temp_file(char* pattern, + const size_t pattern_len) { + std::vector pattern_vec(pattern, pattern + pattern_len); + + const auto fd = XPlatform::Syscall::CreateAndOpenTempFile(pattern_vec); + if (fd != -1) { + std::copy_n(pattern_vec.begin(), pattern_len, pattern); + } + return fd; +} + +int x_platform_syscall_close_file(const int fd) { + return XPlatform::Syscall::CloseFile(fd); +} +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h index be905ae1d364e..93878b144db98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h @@ -24,5 +24,8 @@ */ int x_platform_syscall_write_to_stdout(const char* msg); +int x_platform_syscall_create_and_open_temp_file(char* pattern, + size_t pattern_len); +int x_platform_syscall_close_file(int fd); #endif // NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_SYSCALL_H diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h index 3a77f2dd6d026..9959f215b2068 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h @@ -20,6 +20,7 @@ #define NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_SYSCALL #include +#include /** * The {@link XPlatform} namespace contains components that @@ -84,6 +85,34 @@ class Syscall { static bool StringCompareIgnoreCase(const std::string& a, const std::string& b); + /** + * Creates and opens a temporary file with a given {@link pattern}. + * The {@link pattern} must end with a minimum of 6 'X' characters. + * This function will first modify the last 6 'X' characters with + * random character values, which serve as the temporary file name. + * Subsequently opens the file and returns the file descriptor for + * the same. The behaviour of this function is the same as that of + * POSIX mkstemp function. The file must be later closed by the + * application and is not handled by this function. + * + * @param pattern the pattern to be used for the temporary filename. + * @returns an integer representing the file descriptor for the + * opened temporary file. Returns -1 in the case of error and sets + * the global errno with the appropriate error code. + */ + static int CreateAndOpenTempFile(std::vector& pattern); + + /** + * Closes the file corresponding to given {@link file_descriptor}. + * + * @param file_descriptor the file descriptor of the file to close. + * @returns a boolean indicating the status of the call to this + * function. true if it's a success, false in the case of an error. + * The global errno is set if the call to this function was not + * successful. + */ + static bool CloseFile(int file_descriptor); + private: static bool WriteToStdoutImpl(const char* message); }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc index 06e96a8732e37..9903bbe853ba3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc @@ -21,6 +21,7 @@ #include #include +#include #include "syscall.h" @@ -54,3 +55,13 @@ bool XPlatform::Syscall::StringCompareIgnoreCase(const std::string& a, const std::string& b) { return strcasecmp(a.c_str(), b.c_str()) == 0; } + +int XPlatform::Syscall::CreateAndOpenTempFile(std::vector& pattern) { + // Make space for mkstemp to add NULL character at the end + pattern.resize(pattern.size() + 1); + return mkstemp(pattern.data()); +} + +bool XPlatform::Syscall::CloseFile(const int file_descriptor) { + return close(file_descriptor) == 0; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc index 2cd9e9d5157df..b5ddd04373841 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc @@ -19,7 +19,14 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include #include #include "syscall.h" @@ -64,3 +71,26 @@ bool XPlatform::Syscall::StringCompareIgnoreCase(const std::string& a, const std::string& b) { return _stricmp(a.c_str(), b.c_str()) == 0; } + +int XPlatform::Syscall::CreateAndOpenTempFile(std::vector& pattern) { + if (_set_errno(0) != 0) { + return -1; + } + + // Make space for _mktemp_s to add NULL character at the end + pattern.resize(pattern.size() + 1); + if (_mktemp_s(pattern.data(), pattern.size()) != 0) { + return -1; + } + + auto fd{-1}; + if (_sopen_s(&fd, pattern.data(), _O_RDWR | _O_CREAT | _O_EXCL, _SH_DENYNO, + _S_IREAD | _S_IWRITE) != 0) { + return -1; + } + return fd; +} + +bool XPlatform::Syscall::CloseFile(const int file_descriptor) { + return _close(file_descriptor) == 0; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt index 32f75f474b8d7..d37fcc29f9cbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt @@ -96,11 +96,13 @@ add_executable(node_exclusion_test node_exclusion_test.cc) target_link_libraries(node_exclusion_test fs gmock_main common ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(node_exclusion node_exclusion_test) -add_executable(configuration_test configuration_test.cc) +add_executable(configuration_test $ configuration_test.cc) +target_include_directories(configuration_test PRIVATE ../lib) target_link_libraries(configuration_test common gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(configuration configuration_test) -add_executable(hdfs_configuration_test hdfs_configuration_test.cc) +add_executable(hdfs_configuration_test $ hdfs_configuration_test.cc) +target_include_directories(hdfs_configuration_test PRIVATE ../lib) target_link_libraries(hdfs_configuration_test common gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(hdfs_configuration hdfs_configuration_test) @@ -108,11 +110,13 @@ add_executable(hdfspp_errors_test hdfspp_errors.cc) target_link_libraries(hdfspp_errors_test common gmock_main bindings_c fs rpc proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(hdfspp_errors hdfspp_errors_test) -add_executable(hdfs_builder_test hdfs_builder_test.cc) +add_executable(hdfs_builder_test $ hdfs_builder_test.cc) +target_include_directories(hdfs_builder_test PRIVATE ../lib) target_link_libraries(hdfs_builder_test test_common gmock_main bindings_c fs rpc proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(hdfs_builder_test hdfs_builder_test) add_executable(logging_test logging_test.cc $) +target_include_directories(logging_test PRIVATE ../lib) target_link_libraries(logging_test common gmock_main bindings_c fs rpc proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(logging_test logging_test) @@ -124,7 +128,8 @@ add_executable(user_lock_test user_lock_test.cc) target_link_libraries(user_lock_test fs gmock_main common ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(user_lock user_lock_test) -add_executable(hdfs_config_connect_bugs_test hdfs_config_connect_bugs.cc) +add_executable(hdfs_config_connect_bugs_test $ hdfs_config_connect_bugs.cc) +target_include_directories(hdfs_config_connect_bugs_test PRIVATE ../lib) target_link_libraries(hdfs_config_connect_bugs_test common gmock_main bindings_c fs rpc proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(hdfs_config_connect_bugs hdfs_config_connect_bugs_test) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.cc index 9534204c92ca0..3bf2524354b6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.cc @@ -299,11 +299,12 @@ TEST(ConfigurationTest, TestFileReads) // Single stream { TempFile tempFile; - writeSimpleConfig(tempFile.filename, "key1", "value1"); + writeSimpleConfig(tempFile.GetFileName(), "key1", "value1"); ConfigurationLoader config_loader; config_loader.ClearSearchPath(); - optional config = config_loader.LoadFromFile(tempFile.filename); + optional config = + config_loader.LoadFromFile(tempFile.GetFileName()); EXPECT_TRUE(config && "Parse first stream"); EXPECT_EQ("value1", config->GetWithDefault("key1", "")); } @@ -311,16 +312,18 @@ TEST(ConfigurationTest, TestFileReads) // Multiple files { TempFile tempFile; - writeSimpleConfig(tempFile.filename, "key1", "value1"); + writeSimpleConfig(tempFile.GetFileName(), "key1", "value1"); ConfigurationLoader loader; - optional config = loader.LoadFromFile(tempFile.filename); + optional config = + loader.LoadFromFile(tempFile.GetFileName()); ASSERT_TRUE(config && "Parse first stream"); EXPECT_EQ("value1", config->GetWithDefault("key1", "")); TempFile tempFile2; - writeSimpleConfig(tempFile2.filename, "key2", "value2"); - optional config2 = loader.OverlayResourceFile(*config, tempFile2.filename); + writeSimpleConfig(tempFile2.GetFileName(), "key2", "value2"); + optional config2 = + loader.OverlayResourceFile(*config, tempFile2.GetFileName()); ASSERT_TRUE(config2 && "Parse second stream"); EXPECT_EQ("value1", config2->GetWithDefault("key1", "")); EXPECT_EQ("value2", config2->GetWithDefault("key2", "")); @@ -350,13 +353,13 @@ TEST(ConfigurationTest, TestFileReads) { TempDir tempDir1; TempFile tempFile1(tempDir1.path + "/file1.xml"); - writeSimpleConfig(tempFile1.filename, "key1", "value1"); + writeSimpleConfig(tempFile1.GetFileName(), "key1", "value1"); TempDir tempDir2; TempFile tempFile2(tempDir2.path + "/file2.xml"); - writeSimpleConfig(tempFile2.filename, "key2", "value2"); + writeSimpleConfig(tempFile2.GetFileName(), "key2", "value2"); TempDir tempDir3; TempFile tempFile3(tempDir3.path + "/file3.xml"); - writeSimpleConfig(tempFile3.filename, "key3", "value3"); + writeSimpleConfig(tempFile3.GetFileName(), "key3", "value3"); ConfigurationLoader loader; loader.SetSearchPath(tempDir1.path + ":" + tempDir2.path + ":" + tempDir3.path); @@ -377,7 +380,7 @@ TEST(ConfigurationTest, TestDefaultConfigs) { { TempDir tempDir; TempFile coreSite(tempDir.path + "/core-site.xml"); - writeSimpleConfig(coreSite.filename, "key1", "value1"); + writeSimpleConfig(coreSite.GetFileName(), "key1", "value1"); ConfigurationLoader loader; loader.SetSearchPath(tempDir.path); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.h index 9ad11b70cc8dc..23fc0d31047b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.h @@ -21,11 +21,19 @@ #include "hdfspp/config_parser.h" #include "common/configuration.h" #include "common/configuration_loader.h" +#include "x-platform/syscall.h" + #include #include #include +#include +#include +#include + #include +#include #include +#include namespace hdfs { @@ -107,23 +115,51 @@ void writeDamagedConfig(const std::string& filename, Args... args) { // TempDir: is deleted on destruction class TempFile { -public: - std::string filename; - char fn_buffer[128]; - int tempFileHandle; - TempFile() : tempFileHandle(-1) { - strncpy(fn_buffer, "/tmp/test_XXXXXXXXXX", sizeof(fn_buffer)); - tempFileHandle = mkstemp(fn_buffer); - EXPECT_NE(-1, tempFileHandle); - filename = fn_buffer; + public: + TempFile() { + std::vector tmp_buf(filename_.begin(), filename_.end()); + fd_ = XPlatform::Syscall::CreateAndOpenTempFile(tmp_buf); + EXPECT_NE(fd_, -1); + filename_.assign(tmp_buf.data()); } - TempFile(const std::string & fn) : filename(fn), tempFileHandle(-1) { - strncpy(fn_buffer, fn.c_str(), sizeof(fn_buffer)); - fn_buffer[sizeof(fn_buffer)-1] = 0; + + TempFile(std::string fn) : filename_(std::move(fn)) {} + + TempFile(const TempFile& other) = default; + + TempFile(TempFile&& other) noexcept + : filename_{std::move(other.filename_)}, fd_{other.fd_} {} + + TempFile& operator=(const TempFile& other) { + if (&other != this) { + filename_ = other.filename_; + fd_ = other.fd_; + } + return *this; } - ~TempFile() { if(-1 != tempFileHandle) close(tempFileHandle); unlink(fn_buffer); } -}; + TempFile& operator=(TempFile&& other) noexcept { + if (&other != this) { + filename_ = std::move(other.filename_); + fd_ = other.fd_; + } + return *this; + } + + [[nodiscard]] const std::string& GetFileName() const { return filename_; } + + ~TempFile() { + if (-1 != fd_) { + EXPECT_NE(XPlatform::Syscall::CloseFile(fd_), -1); + } + + unlink(filename_.c_str()); + } + + private: + std::string filename_{"/tmp/test_XXXXXXXXXX"}; + int fd_{-1}; +}; // Callback to remove a directory in the nftw visitor int nftw_remove(const char *fpath, const struct stat *sb, int typeflag, struct FTW *ftwbuf) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_builder_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_builder_test.cc index 01db69d41ef22..147cfee6be82c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_builder_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_builder_test.cc @@ -45,7 +45,7 @@ TEST(HdfsBuilderTest, TestRead) { TempDir tempDir1; TempFile tempFile1(tempDir1.path + "/core-site.xml"); - writeSimpleConfig(tempFile1.filename, "key1", "value1"); + writeSimpleConfig(tempFile1.GetFileName(), "key1", "value1"); hdfsBuilder * builder = hdfsNewBuilderFromDirectory(tempDir1.path.c_str()); @@ -68,7 +68,7 @@ TEST(HdfsBuilderTest, TestRead) { TempDir tempDir1; TempFile tempFile1(tempDir1.path + "/core-site.xml"); - writeSimpleConfig(tempFile1.filename, "key1", "100"); + writeSimpleConfig(tempFile1.GetFileName(), "key1", "100"); hdfsBuilder * builder = hdfsNewBuilderFromDirectory(tempDir1.path.c_str()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_configuration_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_configuration_test.cc index b21725c50fae7..4e1bc3b65aec9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_configuration_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_configuration_test.cc @@ -72,9 +72,9 @@ TEST(HdfsConfigurationTest, TestDefaultConfigs) { { TempDir tempDir; TempFile coreSite(tempDir.path + "/core-site.xml"); - writeSimpleConfig(coreSite.filename, "key1", "value1"); + writeSimpleConfig(coreSite.GetFileName(), "key1", "value1"); TempFile hdfsSite(tempDir.path + "/hdfs-site.xml"); - writeSimpleConfig(hdfsSite.filename, "key2", "value2"); + writeSimpleConfig(hdfsSite.GetFileName(), "key2", "value2"); ConfigurationLoader loader; loader.SetSearchPath(tempDir.path); @@ -89,7 +89,7 @@ TEST(HdfsConfigurationTest, TestDefaultConfigs) { { TempDir tempDir; TempFile coreSite(tempDir.path + "/core-site.xml"); - writeSimpleConfig(coreSite.filename, "key1", "value1"); + writeSimpleConfig(coreSite.GetFileName(), "key1", "value1"); ConfigurationLoader loader; loader.SetSearchPath(tempDir.path); @@ -103,7 +103,7 @@ TEST(HdfsConfigurationTest, TestDefaultConfigs) { { TempDir tempDir; TempFile hdfsSite(tempDir.path + "/hdfs-site.xml"); - writeSimpleConfig(hdfsSite.filename, "key2", "value2"); + writeSimpleConfig(hdfsSite.GetFileName(), "key2", "value2"); ConfigurationLoader loader; loader.SetSearchPath(tempDir.path); @@ -121,9 +121,9 @@ TEST(HdfsConfigurationTest, TestConfigParserAPI) { { TempDir tempDir; TempFile coreSite(tempDir.path + "/core-site.xml"); - writeSimpleConfig(coreSite.filename, "key1", "value1"); + writeSimpleConfig(coreSite.GetFileName(), "key1", "value1"); TempFile hdfsSite(tempDir.path + "/hdfs-site.xml"); - writeSimpleConfig(hdfsSite.filename, "key2", "value2"); + writeSimpleConfig(hdfsSite.GetFileName(), "key2", "value2"); ConfigParser parser(tempDir.path); @@ -142,9 +142,9 @@ TEST(HdfsConfigurationTest, TestConfigParserAPI) { { TempDir tempDir; TempFile coreSite(tempDir.path + "/core-site.xml"); - writeSimpleConfig(coreSite.filename, "key1", "value1"); + writeSimpleConfig(coreSite.GetFileName(), "key1", "value1"); TempFile hdfsSite(tempDir.path + "/hdfs-site.xml"); - writeDamagedConfig(hdfsSite.filename, "key2", "value2"); + writeDamagedConfig(hdfsSite.GetFileName(), "key2", "value2"); ConfigParser parser(tempDir.path); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc index 7fa3971cf7edd..a7847e1db3547 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc @@ -85,3 +85,21 @@ TEST(XPlatformSyscall, StringCompareIgnoreCaseNegative) { EXPECT_FALSE(XPlatform::Syscall::StringCompareIgnoreCase("abcd", "abcde")); EXPECT_FALSE(XPlatform::Syscall::StringCompareIgnoreCase("12345", "abcde")); } + +TEST(XPlatformSyscall, CreateAndOpenTempFileBasic) { + std::string pattern("tmp-XXXXXX"); + std::vector pattern_vec(pattern.begin(), pattern.end()); + + const auto fd = XPlatform::Syscall::CreateAndOpenTempFile(pattern_vec); + EXPECT_GT(fd, -1); + EXPECT_TRUE(XPlatform::Syscall::CloseFile(fd)); +} + +TEST(XPlatformSyscall, CreateAndOpenTempFileNegative) { + std::string pattern("does-not-adhere-to-pattern"); + std::vector pattern_vec(pattern.begin(), pattern.end()); + + const auto fd = XPlatform::Syscall::CreateAndOpenTempFile(pattern_vec); + EXPECT_EQ(fd, -1); + EXPECT_FALSE(XPlatform::Syscall::CloseFile(fd)); +} From baceca06241280612c116327f75fa19dc4d62cfc Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Tue, 13 Apr 2021 22:26:49 +0530 Subject: [PATCH 230/666] review comments changes --- .../src/main/java/org/apache/hadoop/tracing/Span.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java index d6ddf964ba693..acc1c47fc9e73 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Span.java @@ -22,7 +22,7 @@ import java.io.Closeable; public class Span implements Closeable { - io.opentelemetry.api.trace.Span span = null; + private io.opentelemetry.api.trace.Span span = null; public Span() { } @@ -45,7 +45,10 @@ public Span addTimelineAnnotation(String msg) { } public SpanContext getContext() { - return new SpanContext(span.getSpanContext()); + if(span != null){ + return new SpanContext(span.getSpanContext()); + } + return null; } public void finish() { From ecc67c5f59706671779b9537533d8de351cd4a2c Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Tue, 13 Apr 2021 23:28:42 +0530 Subject: [PATCH 231/666] Revert "HDFS-15884. RBF: Remove unused method getCreateLocation in RouterRpcServer (#2754). Contributed by tomscut." This reverts commit e565b05c80c731898a54f8c1d358c12090ca5bbf. The removed method needs to be used in HDFS-15423. --- .../server/federation/router/RouterRpcServer.java | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 6bf159f7886a0..1d0800e4bd833 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -776,6 +776,20 @@ public HdfsFileStatus create(String src, FsPermission masked, replication, blockSize, supportedVersions, ecPolicyName, storagePolicy); } + + /** + * Get the location to create a file. It checks if the file already existed + * in one of the locations. + * + * @param src Path of the file to check. + * @return The remote location for this file. + * @throws IOException If the file has no creation location. + */ + RemoteLocation getCreateLocation(final String src) throws IOException { + final List locations = getLocationsForPath(src, true); + return getCreateLocation(src, locations); + } + /** * Get the location to create a file. It checks if the file already existed * in one of the locations. From baebd2d6d1708fd992439c3a7be2c0462a29cf00 Mon Sep 17 00:00:00 2001 From: Miklos Gergely Date: Wed, 14 Apr 2021 17:16:30 +0200 Subject: [PATCH 232/666] YARN-10736. Fix GetApplicationsRequest JavaDoc. Contributed by Miklos Gergely. (#2906) --- .../yarn/api/protocolrecords/GetApplicationsRequest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java index 81d98b5604885..5a00fa93f0c6c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java @@ -395,7 +395,7 @@ public abstract void setStartRange(long begin, long end) public abstract void setScope(ApplicationsRequestScope scope); /** - * Set the name to filter applications. + * Get the name to filter applications. * * @return the name */ @@ -404,7 +404,7 @@ public abstract void setStartRange(long begin, long end) public abstract String getName(); /** - * Get the name to filter applications. + * Set the name to filter applications. * * @param name of the application */ From a7f926df590ac9cd23d5d830bd2e48dcfc5c1a3d Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 15 Apr 2021 14:34:46 +0530 Subject: [PATCH 233/666] HADOOP-17524. Remove EventCounter and Log counters from JVM Metrics (#2909) Reviewed-by: Duo Zhang Signed-off-by: Akira Ajisaka --- .../org/apache/hadoop/log/EventCounter.java | 34 ------ .../hadoop/log/metrics/EventCounter.java | 100 ------------------ .../hadoop/metrics2/source/JvmMetrics.java | 8 -- .../metrics2/source/TestJvmMetrics.java | 5 - 4 files changed, 147 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/EventCounter.java delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/metrics/EventCounter.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/EventCounter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/EventCounter.java deleted file mode 100644 index 3192d06807326..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/EventCounter.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.log; - -/** - * A log4J Appender that simply counts logging events in three levels: - * fatal, error and warn. The class name is used in log4j.properties - * @deprecated use {@link org.apache.hadoop.log.metrics.EventCounter} instead - */ -@Deprecated -public class EventCounter extends org.apache.hadoop.log.metrics.EventCounter { - static { - // The logging system is not started yet. - System.err.println("WARNING: "+ EventCounter.class.getName() + - " is deprecated. Please use "+ - org.apache.hadoop.log.metrics.EventCounter.class.getName() + - " in all the log4j.properties files."); - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/metrics/EventCounter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/metrics/EventCounter.java deleted file mode 100644 index 95b9cfc5409b0..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/metrics/EventCounter.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.log.metrics; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.spi.LoggingEvent; - -/** - * A log4J Appender that simply counts logging events in three levels: - * fatal, error and warn. The class name is used in log4j.properties - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class EventCounter extends AppenderSkeleton { - private static final int FATAL = 0; - private static final int ERROR = 1; - private static final int WARN = 2; - private static final int INFO = 3; - - private static class EventCounts { - private final long[] counts = {0, 0, 0, 0}; - - private synchronized void incr(int i) { - ++counts[i]; - } - - private synchronized long get(int i) { - return counts[i]; - } - } - - private static EventCounts counts = new EventCounts(); - - @InterfaceAudience.Private - public static long getFatal() { - return counts.get(FATAL); - } - - @InterfaceAudience.Private - public static long getError() { - return counts.get(ERROR); - } - - @InterfaceAudience.Private - public static long getWarn() { - return counts.get(WARN); - } - - @InterfaceAudience.Private - public static long getInfo() { - return counts.get(INFO); - } - - @Override - public void append(LoggingEvent event) { - Level level = event.getLevel(); - // depends on the api, == might not work - // see HADOOP-7055 for details - if (level.equals(Level.INFO)) { - counts.incr(INFO); - } - else if (level.equals(Level.WARN)) { - counts.incr(WARN); - } - else if (level.equals(Level.ERROR)) { - counts.incr(ERROR); - } - else if (level.equals(Level.FATAL)) { - counts.incr(FATAL); - } - } - - @Override - public void close() { - } - - @Override - public boolean requiresLayout() { - return false; - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java index 816940b109879..9e5a78fd90571 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java @@ -33,7 +33,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.log.metrics.EventCounter; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -154,7 +153,6 @@ public void getMetrics(MetricsCollector collector, boolean all) { } else { getThreadUsageFromGroup(rb); } - getEventCounters(rb); } private void getMemoryUsage(MetricsRecordBuilder rb) { @@ -284,10 +282,4 @@ private void getThreadUsageFromGroup(MetricsRecordBuilder rb) { .addGauge(ThreadsTerminated, threadsTerminated); } - private void getEventCounters(MetricsRecordBuilder rb) { - rb.addCounter(LogFatal, EventCounter.getFatal()) - .addCounter(LogError, EventCounter.getError()) - .addCounter(LogWarn, EventCounter.getWarn()) - .addCounter(LogInfo, EventCounter.getInfo()); - } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java index 6fdd64dca7c30..ea86fc14c7978 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java @@ -78,13 +78,8 @@ public void testJvmPauseMonitorPresence() { for (JvmMetricsInfo info : JvmMetricsInfo.values()) { if (info.name().startsWith("Mem")) { verify(rb).addGauge(eq(info), anyFloat()); - } else if (info.name().startsWith("Gc") && - !info.name().equals("GcTimePercentage")) { - verify(rb).addCounter(eq(info), anyLong()); } else if (info.name().startsWith("Threads")) { verify(rb).addGauge(eq(info), anyInt()); - } else if (info.name().startsWith("Log")) { - verify(rb).addCounter(eq(info), anyLong()); } } } From b37d613c7487f4cc7d4396dc5a1aafc943b23443 Mon Sep 17 00:00:00 2001 From: Masatake Iwasaki Date: Thu, 15 Apr 2021 21:33:18 +0900 Subject: [PATCH 234/666] HADOOP-17569. Building native code fails on Fedora 33. (#2886) --- .../hadoop-common/src/main/native/src/exception.c | 4 ++-- .../hadoop-hdfs-native-client/src/CMakeLists.txt | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/exception.c b/hadoop-common-project/hadoop-common/src/main/native/src/exception.c index fc072e8002bf2..a25cc3d3b7eef 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/exception.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/exception.c @@ -111,8 +111,8 @@ jthrowable newIOException(JNIEnv* env, const char *fmt, ...) const char* terror(int errnum) { -#if defined(__sun) -// MT-Safe under Solaris which doesn't support sys_errlist/sys_nerr +#if defined(__sun) || defined(__GLIBC_PREREQ) && __GLIBC_PREREQ(2, 32) +// MT-Safe under Solaris or glibc >= 2.32 not supporting sys_errlist/sys_nerr return strerror(errnum); #else if ((errnum < 0) || (errnum >= sys_nerr)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt index 4e943de1773f2..d8c2012fb88de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt @@ -69,6 +69,8 @@ if(WIN32) set(OUT_DIR bin) else() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden") + # using old default behavior on GCC >= 10.0 + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fcommon") set(OS_DIR ${CMAKE_SOURCE_DIR}/main/native/libhdfs/os/posix) # IMPORTANT: OUT_DIR MUST be relative to maven's From d3c68182e24dfc914c363e5e535638189a7a4b9f Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Thu, 15 Apr 2021 14:45:51 -0700 Subject: [PATCH 235/666] HDFS-15850. Superuser actions should be reported to external enforcers (#2784) --- .../ContentSummaryComputationContext.java | 9 ++- .../hdfs/server/namenode/FSDirAttrOp.java | 35 ++++++--- .../namenode/FSDirStatAndListingOp.java | 7 +- .../hdfs/server/namenode/FSDirXAttrOp.java | 5 +- .../hdfs/server/namenode/FSDirectory.java | 30 +++++--- .../hdfs/server/namenode/FSNamesystem.java | 74 ++++++++++-------- .../server/namenode/FSPermissionChecker.java | 75 +++++++++++++++++-- .../namenode/INodeAttributeProvider.java | 45 ++++++++++- .../hadoop/hdfs/server/namenode/NameNode.java | 24 +++--- .../server/namenode/NameNodeRpcServer.java | 51 ++++++++----- .../hdfs/server/namenode/NamenodeFsck.java | 3 +- .../namenode/XAttrPermissionFilter.java | 16 +++- 12 files changed, 270 insertions(+), 104 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java index 7a5963a6c57cd..e304baf652843 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java @@ -205,8 +205,13 @@ public String getErasureCodingPolicyName(INode inode) { void checkPermission(INodeDirectory inode, int snapshotId, FsAction access) throws AccessControlException { if (dir != null && dir.isPermissionEnabled() - && pc != null && !pc.isSuperUser()) { - pc.checkPermission(inode, snapshotId, access); + && pc != null) { + if (pc.isSuperUser()) { + // call external enforcer for audit + pc.checkSuperuserPrivilege(inode.getFullPathName()); + } else { + pc.checkPermission(inode, snapshotId, access); + } } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index 173348f356473..5914d7449619c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -83,14 +83,25 @@ static FileStatus setOwner( try { iip = fsd.resolvePath(pc, src, DirOp.WRITE); fsd.checkOwner(pc, iip); - if (!pc.isSuperUser()) { - if (username != null && !pc.getUser().equals(username)) { - throw new AccessControlException("User " + pc.getUser() - + " is not a super user (non-super user cannot change owner)."); - } - if (group != null && !pc.isMemberOfGroup(group)) { - throw new AccessControlException( - "User " + pc.getUser() + " does not belong to " + group); + // At this point, the user must be either owner or super user. + // superuser: can change owner to a different user, + // change owner group to any group + // owner: can't change owner to a different user but can change owner + // group to different group that the user belongs to. + if ((username != null && !pc.getUser().equals(username)) || + (group != null && !pc.isMemberOfGroup(group))) { + try { + // check if the user is superuser + pc.checkSuperuserPrivilege(iip.getPath()); + } catch (AccessControlException e) { + if (username != null && !pc.getUser().equals(username)) { + throw new AccessControlException("User " + pc.getUser() + + " is not a super user (non-super user cannot change owner)."); + } + if (group != null && !pc.isMemberOfGroup(group)) { + throw new AccessControlException( + "User " + pc.getUser() + " does not belong to " + group); + } } } changed = unprotectedSetOwner(fsd, iip, username, group); @@ -238,10 +249,12 @@ static void setQuota(FSDirectory fsd, FSPermissionChecker pc, String src, fsd.writeLock(); try { INodesInPath iip = fsd.resolvePath(pc, src, DirOp.WRITE); + // Here, the assumption is that the caller of this method has + // already checked for super user privilege if (fsd.isPermissionEnabled() && !pc.isSuperUser() && allowOwner) { - INodeDirectory parentDir= iip.getLastINode().getParent(); - if (parentDir == null || - !parentDir.getUserName().equals(pc.getUser())) { + try { + fsd.checkOwner(pc, iip.getParentINodesInPath()); + } catch(AccessControlException ace) { throw new AccessControlException( "Access denied for user " + pc.getUser() + ". Superuser or owner of parent folder privilege is required"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index dfacc491eae53..8aff179358896 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -105,6 +105,7 @@ static HdfsFileStatus getFileInfo(FSDirectory fsd, FSPermissionChecker pc, // superuser to receive null instead. try { iip = fsd.resolvePath(pc, srcArg, dirOp); + pc.checkSuperuserPrivilege(iip.getPath()); } catch (AccessControlException ace) { return null; } @@ -151,12 +152,14 @@ static GetBlockLocationsResult getBlockLocations( BlockManager bm = fsd.getBlockManager(); fsd.readLock(); try { - final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ); + // Just get INodesInPath without access checks, since we check for path + // access later + final INodesInPath iip = fsd.resolvePath(null, src, DirOp.READ); src = iip.getPath(); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); if (fsd.isPermissionEnabled()) { - fsd.checkPathAccess(pc, iip, FsAction.READ); fsd.checkUnreadableBySuperuser(pc, iip); + fsd.checkPathAccess(pc, iip, FsAction.READ); } final long fileSize = iip.isSnapshot() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index ef83fe982b24c..ce79321f96801 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -437,7 +437,10 @@ private static void checkXAttrChangeAccess( if (inode != null && inode.isDirectory() && inode.getFsPermission().getStickyBit()) { - if (!pc.isSuperUser()) { + if (pc.isSuperUser()) { + // call external enforcer for audit + pc.checkSuperuserPrivilege(iip.getPath()); + } else { fsd.checkOwner(pc, iip); } } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 0e15921ba4953..497aa84e767a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -717,18 +717,18 @@ public INodesInPath resolvePath(FSPermissionChecker pc, String src, byte[][] components = INode.getPathComponents(src); boolean isRaw = isReservedRawName(components); + components = resolveComponents(components, this); + INodesInPath iip = INodesInPath.resolve(rootDir, components, isRaw); if (isPermissionEnabled && pc != null && isRaw) { switch(dirOp) { - case READ_LINK: - case READ: - break; - default: - pc.checkSuperuserPrivilege(); - break; + case READ_LINK: + case READ: + break; + default: + pc.checkSuperuserPrivilege(iip.getPath()); + break; } } - components = resolveComponents(components, this); - INodesInPath iip = INodesInPath.resolve(rootDir, components, isRaw); // verify all ancestors are dirs and traversable. note that only // methods that create new namespace items have the signature to throw // PNDE @@ -1942,7 +1942,10 @@ void checkPermission(FSPermissionChecker pc, INodesInPath iip, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess, boolean ignoreEmptyDir) throws AccessControlException { - if (!pc.isSuperUser()) { + if (pc.isSuperUser()) { + // call the external enforcer for audit + pc.checkSuperuserPrivilege(iip.getPath()); + } else { readLock(); try { pc.checkPermission(iip, doCheckOwner, ancestorAccess, @@ -1958,9 +1961,12 @@ void checkUnreadableBySuperuser(FSPermissionChecker pc, INodesInPath iip) if (pc.isSuperUser()) { if (FSDirXAttrOp.getXAttrByPrefixedName(this, iip, SECURITY_XATTR_UNREADABLE_BY_SUPERUSER) != null) { - throw new AccessControlException( - "Access is denied for " + pc.getUser() + " since the superuser " - + "is not allowed to perform this operation."); + String errorMessage = "Access is denied for " + pc.getUser() + + " since the superuser is not allowed to perform this operation."; + pc.denyUserAccess(iip.getPath(), errorMessage); + } else { + // call the external enforcer for audit. + pc.checkSuperuserPrivilege(iip.getPath()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7d9f78c0647fb..e559515696d66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1954,7 +1954,7 @@ BatchedListEntries listOpenFiles(long prevId, EnumSet openFilesTypes, String path) throws IOException { INode.checkAbsolutePath(path); final String operationName = "listOpenFiles"; - checkSuperuserPrivilege(); + checkSuperuserPrivilege(operationName, path); checkOperation(OperationCategory.READ); BatchedListEntries batchedListEntries; String normalizedPath = new Path(path).toString(); // normalize path. @@ -2289,6 +2289,7 @@ boolean truncate(String src, long newLength, String clientName, final String operationName = "truncate"; requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE); FSDirTruncateOp.TruncateResult r = null; + FileStatus status; try { NameNode.stateChangeLog.info( "DIR* NameSystem.truncate: src={} newLength={}", src, newLength); @@ -2307,7 +2308,7 @@ boolean truncate(String src, long newLength, String clientName, r = FSDirTruncateOp.truncate(this, src, newLength, clientName, clientMachine, mtime, toRemoveBlocks, pc); } finally { - FileStatus status = r != null ? r.getFileStatus() : null; + status = r != null ? r.getFileStatus() : null; writeUnlock(operationName, getLockReportInfoSupplier(src, null, status)); } @@ -2316,11 +2317,12 @@ boolean truncate(String src, long newLength, String clientName, removeBlocks(toRemoveBlocks); toRemoveBlocks.clear(); } - logAuditEvent(true, operationName, src, null, r.getFileStatus()); + logAuditEvent(true, operationName, src, null, status); } catch (AccessControlException e) { logAuditEvent(false, operationName, src); throw e; } + assert(r != null); return r.getResult(); } @@ -3582,7 +3584,7 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) FSPermissionChecker.setOperationType(operationName); try { if(!allowOwnerSetQuota) { - checkSuperuserPrivilege(pc); + checkSuperuserPrivilege(operationName, src); } writeLock(); try { @@ -5222,18 +5224,22 @@ PermissionStatus createFsOwnerPermissions(FsPermission permission) { return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission); } + /** + * This method is retained for backward compatibility. + * Please use {@link #checkSuperuserPrivilege(String)} instead. + * + * @throws AccessControlException if user is not a super user. + */ void checkSuperuserPrivilege() throws AccessControlException { if (isPermissionEnabled) { FSPermissionChecker pc = getPermissionChecker(); - pc.checkSuperuserPrivilege(); + pc.checkSuperuserPrivilege(null); } } - void checkSuperuserPrivilege(FSPermissionChecker pc) - throws AccessControlException { - if (isPermissionEnabled) { - pc.checkSuperuserPrivilege(); - } + void checkSuperuserPrivilege(String operationName) + throws IOException { + checkSuperuserPrivilege(operationName, null); } /** @@ -6011,7 +6017,8 @@ public String toString() { */ Collection listCorruptFileBlocks(String path, String[] cookieTab) throws IOException { - checkSuperuserPrivilege(); + final String operationName = "listCorruptFileBlocks"; + checkSuperuserPrivilege(operationName, path); checkOperation(OperationCategory.READ); int count = 0; @@ -6939,7 +6946,7 @@ public SnapshotManager getSnapshotManager() { void allowSnapshot(String path) throws IOException { checkOperation(OperationCategory.WRITE); final String operationName = "allowSnapshot"; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, path); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6956,7 +6963,7 @@ void allowSnapshot(String path) throws IOException { void disallowSnapshot(String path) throws IOException { checkOperation(OperationCategory.WRITE); final String operationName = "disallowSnapshot"; - checkSuperuserPrivilege(operationName); + checkSuperuserPrivilege(operationName, path); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7666,13 +7673,14 @@ void addCachePool(CachePoolInfo req, boolean logRetryCache) final String operationName = "addCachePool"; checkOperation(OperationCategory.WRITE); String poolInfoStr = null; + String poolName = req == null ? null : req.getPoolName(); try { - checkSuperuserPrivilege(); + checkSuperuserPrivilege(operationName, poolName); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot add cache pool" - + (req == null ? null : req.getPoolName())); + + poolName); CachePoolInfo info = FSNDNCacheOp.addCachePool(this, cacheManager, req, logRetryCache); poolInfoStr = info.toString(); @@ -7694,7 +7702,7 @@ void modifyCachePool(CachePoolInfo req, boolean logRetryCache) String poolNameStr = "{poolName: " + (req == null ? null : req.getPoolName()) + "}"; try { - checkSuperuserPrivilege(); + checkSuperuserPrivilege(operationName, poolNameStr); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7721,7 +7729,7 @@ void removeCachePool(String cachePoolName, boolean logRetryCache) checkOperation(OperationCategory.WRITE); String poolNameStr = "{poolName: " + cachePoolName + "}"; try { - checkSuperuserPrivilege(); + checkSuperuserPrivilege(operationName, poolNameStr); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -7926,8 +7934,7 @@ void createEncryptionZone(final String src, final String keyName, Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir, keyName, src); final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(operationName); - checkSuperuserPrivilege(pc); + checkSuperuserPrivilege(operationName, src); checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -7988,9 +7995,7 @@ BatchedListEntries listEncryptionZones(long prevId) final String operationName = "listEncryptionZones"; boolean success = false; checkOperation(OperationCategory.READ); - final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(operationName); - checkSuperuserPrivilege(pc); + checkSuperuserPrivilege(operationName, dir.rootDir.getFullPathName()); readLock(); try { checkOperation(OperationCategory.READ); @@ -8006,12 +8011,13 @@ BatchedListEntries listEncryptionZones(long prevId) void reencryptEncryptionZone(final String zone, final ReencryptAction action, final boolean logRetryCache) throws IOException { + final String operationName = "reencryptEncryptionZone"; boolean success = false; try { Preconditions.checkNotNull(zone, "zone is null."); checkOperation(OperationCategory.WRITE); final FSPermissionChecker pc = dir.getPermissionChecker(); - checkSuperuserPrivilege(pc); + checkSuperuserPrivilege(operationName, zone); checkNameNodeSafeMode("NameNode in safemode, cannot " + action + " re-encryption on zone " + zone); reencryptEncryptionZoneInt(pc, zone, action, logRetryCache); @@ -8026,9 +8032,7 @@ BatchedListEntries listReencryptionStatus( final String operationName = "listReencryptionStatus"; boolean success = false; checkOperation(OperationCategory.READ); - final FSPermissionChecker pc = getPermissionChecker(); - FSPermissionChecker.setOperationType(operationName); - checkSuperuserPrivilege(pc); + checkSuperuserPrivilege(operationName, dir.rootDir.getFullPathName()); readLock(); try { checkOperation(OperationCategory.READ); @@ -8871,15 +8875,19 @@ private ECTopologyVerifierResult getEcTopologyVerifierResultForEnabledPolicies() Arrays.asList(enabledEcPolicies)); } - // This method logs operatoinName without super user privilege. + // This method logs operationName without super user privilege. // It should be called without holding FSN lock. - void checkSuperuserPrivilege(String operationName) + void checkSuperuserPrivilege(String operationName, String path) throws IOException { - try { - checkSuperuserPrivilege(); - } catch (AccessControlException ace) { - logAuditEvent(false, operationName, null); - throw ace; + if (isPermissionEnabled) { + try { + FSPermissionChecker.setOperationType(operationName); + FSPermissionChecker pc = getPermissionChecker(); + pc.checkSuperuserPrivilege(path); + } catch(AccessControlException ace){ + logAuditEvent(false, operationName, path); + throw ace; + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 3f80952a8e9af..e8e292761d40c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer; +import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AuthorizationContext; import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -144,18 +145,74 @@ private AccessControlEnforcer getAccessControlEnforcer() { ? attributeProvider.getExternalAccessControlEnforcer(this) : this; } + private AuthorizationContext getAuthorizationContextForSuperUser( + String path) { + String opType = operationType.get(); + + AuthorizationContext.Builder builder = + new INodeAttributeProvider.AuthorizationContext.Builder(); + builder.fsOwner(fsOwner). + supergroup(supergroup). + callerUgi(callerUgi). + operationName(opType). + callerContext(CallerContext.getCurrent()); + + // Add path to the context builder only if it is not null. + if (path != null && !path.isEmpty()) { + builder.path(path); + } + + return builder.build(); + } + + /** + * This method is retained to maintain backward compatibility. + * Please use the new method {@link #checkSuperuserPrivilege(String)} to make + * sure that the external enforcers have the correct context to audit. + * + * @throws AccessControlException if the caller is not a super user. + */ + public void checkSuperuserPrivilege() throws AccessControlException { + checkSuperuserPrivilege(null); + } + /** - * Verify if the caller has the required permission. This will result into - * an exception if the caller is not allowed to access the resource. + * Checks if the caller has super user privileges. + * Throws {@link AccessControlException} for non super users. + * + * @param path The resource path for which permission is being requested. + * @throws AccessControlException if the caller is not a super user. + */ + public void checkSuperuserPrivilege(String path) + throws AccessControlException { + if (LOG.isDebugEnabled()) { + LOG.debug("SUPERUSER ACCESS CHECK: " + this + + ", operationName=" + FSPermissionChecker.operationType.get() + + ", path=" + path); + } + getAccessControlEnforcer().checkSuperUserPermissionWithContext( + getAuthorizationContextForSuperUser(path)); + } + + /** + * Calls the external enforcer to notify denial of access to the user with + * the given error message. Always throws an ACE with the given message. + * + * @param path The resource path for which permission is being requested. + * @param errorMessage message for the exception. + * @throws AccessControlException with the error message. */ - public void checkSuperuserPrivilege() + public void denyUserAccess(String path, String errorMessage) throws AccessControlException { - if (!isSuperUser()) { - throw new AccessControlException("Access denied for user " - + getUser() + ". Superuser privilege is required"); + if (LOG.isDebugEnabled()) { + LOG.debug("DENY USER ACCESS: " + this + + ", operationName=" + FSPermissionChecker.operationType.get() + + ", path=" + path); } + getAccessControlEnforcer().denyUserAccess( + getAuthorizationContextForSuperUser(path), errorMessage); } - + /** * Check whether current user have permissions to access the path. * Traverse is always checked. @@ -705,6 +762,10 @@ static void checkTraverse(FSPermissionChecker pc, INodesInPath iip, UnresolvedPathException, ParentNotDirectoryException { try { if (pc == null || pc.isSuperUser()) { + if (pc != null) { + // call the external enforcer for audit + pc.checkSuperuserPrivilege(iip.getPath()); + } checkSimpleTraverse(iip); } else { pc.checkPermission(iip, false, null, null, null, null, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java index e83c962a4a845..f5361adc4cff4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java @@ -362,7 +362,7 @@ public interface AccessControlEnforcer { * Checks permission on a file system object. Has to throw an Exception * if the filesystem object is not accessible by the calling Ugi. * @param fsOwner Filesystem owner (The Namenode user) - * @param supergroup super user geoup + * @param supergroup super user group * @param callerUgi UserGroupInformation of the caller * @param inodeAttrs Array of INode attributes for each path element in the * the path @@ -393,7 +393,7 @@ public abstract void checkPermission(String fsOwner, String supergroup, /** * Checks permission on a file system object. Has to throw an Exception - * if the filesystem object is not accessessible by the calling Ugi. + * if the filesystem object is not accessible by the calling Ugi. * @param authzContext an {@link AuthorizationContext} object encapsulating * the various parameters required to authorize an * operation. @@ -405,7 +405,48 @@ default void checkPermissionWithContext(AuthorizationContext authzContext) + "implement the checkPermissionWithContext(AuthorizationContext) " + "API."); } + + /** + * Checks if the user is a superuser or belongs to superuser group. + * It throws an AccessControlException if user is not a superuser. + * + * @param authzContext an {@link AuthorizationContext} object encapsulating + * the various parameters required to authorize an + * operation. + * @throws AccessControlException - if user is not a super user or part + * of the super user group. + */ + default void checkSuperUserPermissionWithContext( + AuthorizationContext authzContext) + throws AccessControlException { + UserGroupInformation callerUgi = authzContext.getCallerUgi(); + boolean isSuperUser = + callerUgi.getShortUserName().equals(authzContext.getFsOwner()) || + callerUgi.getGroupsSet().contains(authzContext.getSupergroup()); + if (!isSuperUser) { + throw new AccessControlException("Access denied for user " + + callerUgi.getShortUserName() + ". Superuser privilege is " + + "required for operation " + authzContext.getOperationName()); + } + } + + /** + * This method must be called when denying access to users to + * notify the external enforcers. + * This will help the external enforcers to audit the requests + * by users that were denied access. + * @param authzContext an {@link AuthorizationContext} object encapsulating + * the various parameters required to authorize an + * operation. + * @throws AccessControlException + */ + default void denyUserAccess(AuthorizationContext authzContext, + String errorMessage) + throws AccessControlException { + throw new AccessControlException(errorMessage); + } } + /** * Initialize the provider. This method is called at NameNode startup * time. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 55196c4d44f03..ed3d80bbfc201 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1835,9 +1835,9 @@ public static void main(String argv[]) throws Exception { } } - synchronized void monitorHealth() - throws HealthCheckFailedException, AccessControlException { - namesystem.checkSuperuserPrivilege(); + synchronized void monitorHealth() throws IOException { + String operationName = "monitorHealth"; + namesystem.checkSuperuserPrivilege(operationName); if (!haEnabled) { return; // no-op, if HA is not enabled } @@ -1859,9 +1859,9 @@ synchronized void monitorHealth() } } - synchronized void transitionToActive() - throws ServiceFailedException, AccessControlException { - namesystem.checkSuperuserPrivilege(); + synchronized void transitionToActive() throws IOException { + String operationName = "transitionToActive"; + namesystem.checkSuperuserPrivilege(operationName); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } @@ -1876,18 +1876,18 @@ synchronized void transitionToActive() state.setState(haContext, ACTIVE_STATE); } - synchronized void transitionToStandby() - throws ServiceFailedException, AccessControlException { - namesystem.checkSuperuserPrivilege(); + synchronized void transitionToStandby() throws IOException { + String operationName = "transitionToStandby"; + namesystem.checkSuperuserPrivilege(operationName); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } state.setState(haContext, STANDBY_STATE); } - synchronized void transitionToObserver() - throws ServiceFailedException, AccessControlException { - namesystem.checkSuperuserPrivilege(); + synchronized void transitionToObserver() throws IOException { + String operationName = "transitionToObserver"; + namesystem.checkSuperuserPrivilege(operationName); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index aee4f68bdc7f2..90819c28ffc3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -642,6 +642,7 @@ private static UserGroupInformation getRemoteUser() throws IOException { public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, long minBlockSize, long timeInterval) throws IOException { + String operationName = "getBlocks"; if(size <= 0) { throw new IllegalArgumentException( "Unexpected not positive size: "+size); @@ -651,15 +652,16 @@ public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, long "Unexpected not positive size: "+size); } checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); namesystem.checkNameNodeSafeMode("Cannot execute getBlocks"); return namesystem.getBlocks(datanode, size, minBlockSize, timeInterval); } @Override // NamenodeProtocol public ExportedBlockKeys getBlockKeys() throws IOException { + String operationName = "getBlockKeys"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.getBlockManager().getBlockKeys(); } @@ -667,9 +669,10 @@ public ExportedBlockKeys getBlockKeys() throws IOException { public void errorReport(NamenodeRegistration registration, int errorCode, String msg) throws IOException { + String operationName = "errorReport"; checkNNStartup(); namesystem.checkOperation(OperationCategory.UNCHECKED); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); verifyRequest(registration); LOG.info("Error report from " + registration + ": " + msg); if (errorCode == FATAL) { @@ -680,8 +683,9 @@ public void errorReport(NamenodeRegistration registration, @Override // NamenodeProtocol public NamenodeRegistration registerSubordinateNamenode( NamenodeRegistration registration) throws IOException { + String operationName = "registerSubordinateNamenode"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); verifyLayoutVersion(registration.getVersion()); NamenodeRegistration myRegistration = nn.setRegistration(); namesystem.registerBackupNode(registration, myRegistration); @@ -691,8 +695,9 @@ public NamenodeRegistration registerSubordinateNamenode( @Override // NamenodeProtocol public NamenodeCommand startCheckpoint(NamenodeRegistration registration) throws IOException { + String operationName = "startCheckpoint"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); verifyRequest(registration); if(!nn.isRole(NamenodeRole.NAMENODE)) throw new IOException("Only an ACTIVE node can invoke startCheckpoint."); @@ -714,8 +719,9 @@ public NamenodeCommand startCheckpoint(NamenodeRegistration registration) @Override // NamenodeProtocol public void endCheckpoint(NamenodeRegistration registration, CheckpointSignature sig) throws IOException { + String operationName = "endCheckpoint"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { return; // Return previous response @@ -1322,17 +1328,19 @@ public void refreshNodes() throws IOException { @Override // NamenodeProtocol public long getTransactionID() throws IOException { + String operationName = "getTransactionID"; checkNNStartup(); namesystem.checkOperation(OperationCategory.UNCHECKED); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.getFSImage().getCorrectLastAppliedOrWrittenTxId(); } @Override // NamenodeProtocol public long getMostRecentCheckpointTxId() throws IOException { + String operationName = "getMostRecentCheckpointTxId"; checkNNStartup(); namesystem.checkOperation(OperationCategory.UNCHECKED); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.getFSImage().getMostRecentCheckpointTxId(); } @@ -1345,23 +1353,26 @@ public CheckpointSignature rollEditLog() throws IOException { @Override // NamenodeProtocol public RemoteEditLogManifest getEditLogManifest(long sinceTxId) throws IOException { + String operationName = "getEditLogManifest"; checkNNStartup(); namesystem.checkOperation(OperationCategory.READ); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.getEditLog().getEditLogManifest(sinceTxId); } @Override // NamenodeProtocol public boolean isUpgradeFinalized() throws IOException { + String operationName = "isUpgradeFinalized"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.isUpgradeFinalized(); } @Override // NamenodeProtocol public boolean isRollingUpgrade() throws IOException { + String operationName = "isRollingUpgrade"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); return namesystem.isRollingUpgrade(); } @@ -2345,9 +2356,10 @@ public void checkAccess(String path, FsAction mode) throws IOException { @Override // ClientProtocol public long getCurrentEditLogTxid() throws IOException { + String operationName = "getCurrentEditLogTxid"; checkNNStartup(); namesystem.checkOperation(OperationCategory.READ); // only active - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); // if it's not yet open for write, we may be in the process of transitioning // from standby to active and may not yet know what the latest committed // txid is @@ -2374,9 +2386,10 @@ private static FSEditLogOp readOp(EditLogInputStream elis) @Override // ClientProtocol public EventBatchList getEditsFromTxid(long txid) throws IOException { + String operationName = "getEditsFromTxid"; checkNNStartup(); namesystem.checkOperation(OperationCategory.READ); // only active - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); int maxEventsPerRPC = nn.getConf().getInt( DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_KEY, DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_DEFAULT); @@ -2521,8 +2534,9 @@ public ECTopologyVerifierResult getECTopologyResultForPolicies( @Override public AddErasureCodingPolicyResponse[] addErasureCodingPolicies( ErasureCodingPolicy[] policies) throws IOException { + String operationName = "addErasureCodingPolicies"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); final CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null); if (cacheEntry != null && cacheEntry.isSuccess()) { @@ -2544,8 +2558,9 @@ public AddErasureCodingPolicyResponse[] addErasureCodingPolicies( @Override public void removeErasureCodingPolicy(String ecPolicyName) throws IOException { + String operationName = "removeErasureCodingPolicy"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { return; @@ -2562,8 +2577,9 @@ public void removeErasureCodingPolicy(String ecPolicyName) @Override // ClientProtocol public void enableErasureCodingPolicy(String ecPolicyName) throws IOException { + String operationName = "enableErasureCodingPolicy"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { return; @@ -2580,8 +2596,9 @@ public void enableErasureCodingPolicy(String ecPolicyName) @Override // ClientProtocol public void disableErasureCodingPolicy(String ecPolicyName) throws IOException { + String operationName = "disableErasureCodingPolicy"; checkNNStartup(); - namesystem.checkSuperuserPrivilege(); + namesystem.checkSuperuserPrivilege(operationName); final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { return; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 3ec7d61859143..a39648979c939 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -377,9 +377,10 @@ private void printDatanodeReplicaStatus(Block block, */ public void fsck() throws AccessControlException { final long startTime = Time.monotonicNow(); + String operationName = "fsck"; try { if(blockIds != null) { - namenode.getNamesystem().checkSuperuserPrivilege(); + namenode.getNamesystem().checkSuperuserPrivilege(operationName, path); StringBuilder sb = new StringBuilder(); sb.append("FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index 92e5ef1a0b86d..2d3adb2975324 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -65,8 +65,14 @@ static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr, boolean isRawPath) throws AccessControlException { final boolean isSuperUser = pc.isSuperUser(); + final String xAttrString = + "XAttr [ns=" + xAttr.getNameSpace() + ", name=" + xAttr.getName() + "]"; if (xAttr.getNameSpace() == XAttr.NameSpace.USER || (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) { + if (isSuperUser) { + // call the external enforcer for audit. + pc.checkSuperuserPrivilege(xAttrString); + } return; } if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isRawPath) { @@ -75,14 +81,16 @@ static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr, if (XAttrHelper.getPrefixedName(xAttr). equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { if (xAttr.getValue() != null) { - throw new AccessControlException("Attempt to set a value for '" + + // Notify external enforcer for audit + String errorMessage = "Attempt to set a value for '" + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + - "'. Values are not allowed for this xattr."); + "'. Values are not allowed for this xattr."; + pc.denyUserAccess(xAttrString, errorMessage); } return; } - throw new AccessControlException("User doesn't have permission for xattr: " - + XAttrHelper.getPrefixedName(xAttr)); + pc.denyUserAccess(xAttrString, "User doesn't have permission for xattr: " + + XAttrHelper.getPrefixedName(xAttr)); } static void checkPermissionForApi(FSPermissionChecker pc, From db56be862808da2b6408900a0196b950e5e3e3f8 Mon Sep 17 00:00:00 2001 From: lfengnan Date: Thu, 15 Apr 2021 16:11:47 -0700 Subject: [PATCH 236/666] HDFS-15423 RBF: WebHDFS create shouldn't choose DN from all sub-clusters (#2903) --- .../router/RouterWebHdfsMethods.java | 47 +++++- .../router/web/RouterWebHDFSContract.java | 2 + .../federation/MiniRouterDFSCluster.java | 19 +-- .../router/TestRouterWebHdfsMethods.java | 147 ++++++++++++++++++ 4 files changed, 198 insertions(+), 17 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index f6ac70c368a52..afc4a3d8fac21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -93,6 +93,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,6 +105,8 @@ import java.util.HashSet; import java.util.List; import java.util.Random; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** * WebHDFS Router implementation. This is an extension of @@ -453,21 +456,33 @@ private DatanodeInfo chooseDatanode(final Router router, final String path, final HttpOpParam.Op op, final long openOffset, final String excludeDatanodes) throws IOException { final RouterRpcServer rpcServer = getRPCServer(router); - DatanodeInfo[] dns = null; + DatanodeInfo[] dns = {}; + String resolvedNs = ""; try { dns = rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE); } catch (IOException e) { LOG.error("Cannot get the datanodes from the RPC server", e); } + if (op == PutOpParam.Op.CREATE) { + try { + resolvedNs = rpcServer.getCreateLocation(path).getNameserviceId(); + } catch (IOException e) { + LOG.error("Cannot get the name service " + + "to create file for path {} ", path, e); + } + } + HashSet excludes = new HashSet(); - if (excludeDatanodes != null) { - Collection collection = - getTrimmedStringCollection(excludeDatanodes); - for (DatanodeInfo dn : dns) { - if (collection.contains(dn.getName())) { - excludes.add(dn); - } + Collection collection = + getTrimmedStringCollection(excludeDatanodes); + for (DatanodeInfo dn : dns) { + String ns = getNsFromDataNodeNetworkLocation(dn.getNetworkLocation()); + if (collection.contains(dn.getName())) { + excludes.add(dn); + } else if (op == PutOpParam.Op.CREATE && !ns.equals(resolvedNs)) { + // for CREATE, the dest dn should be in the resolved ns + excludes.add(dn); } } @@ -502,6 +517,22 @@ private DatanodeInfo chooseDatanode(final Router router, return getRandomDatanode(dns, excludes); } + /** + * Get the nameservice info from datanode network location. + * @param location network location with format `/ns0/rack1` + * @return nameservice this datanode is in + */ + @VisibleForTesting + public static String getNsFromDataNodeNetworkLocation(String location) { + // network location should be in the format of /ns/rack + Pattern pattern = Pattern.compile("^/([^/]*)/"); + Matcher matcher = pattern.matcher(location); + if (matcher.find()) { + return matcher.group(1); + } + return ""; + } + /** * Get a random Datanode from a subcluster. * @param dns Nodes to be chosen from. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java index 1d308073290d4..6b90faecc78f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/RouterWebHDFSContract.java @@ -64,6 +64,8 @@ public static void createCluster(Configuration conf) throws IOException { conf.addResource(CONTRACT_WEBHDFS_XML); cluster = new MiniRouterDFSCluster(true, 2, conf); + cluster.setIndependentDNs(); + cluster.setNumDatanodesPerNameservice(3); // Start NNs and DNs and wait until ready cluster.startCluster(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index 896d08f2c49b6..8a7a03e018b95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -774,6 +774,15 @@ public void startCluster(Configuration overrideConf) { } topology.setFederation(true); + // Generate conf for namenodes and datanodes + String ns0 = nameservices.get(0); + Configuration nnConf = generateNamenodeConfiguration(ns0); + if (overrideConf != null) { + nnConf.addResource(overrideConf); + // Router also uses this configurations as initial values. + routerConf = new Configuration(overrideConf); + } + // Set independent DNs across subclusters int numDNs = nameservices.size() * numDatanodesPerNameservice; Configuration[] dnConfs = null; @@ -781,7 +790,7 @@ public void startCluster(Configuration overrideConf) { dnConfs = new Configuration[numDNs]; int dnId = 0; for (String nsId : nameservices) { - Configuration subclusterConf = new Configuration(); + Configuration subclusterConf = new Configuration(nnConf); subclusterConf.set(DFS_INTERNAL_NAMESERVICES_KEY, nsId); for (int i = 0; i < numDatanodesPerNameservice; i++) { dnConfs[dnId] = subclusterConf; @@ -791,14 +800,6 @@ public void startCluster(Configuration overrideConf) { } // Start mini DFS cluster - String ns0 = nameservices.get(0); - Configuration nnConf = generateNamenodeConfiguration(ns0); - if (overrideConf != null) { - nnConf.addResource(overrideConf); - // Router also uses this configurations as initial values. - routerConf = new Configuration(overrideConf); - } - cluster = new MiniDFSCluster.Builder(nnConf) .numDataNodes(numDNs) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java new file mode 100644 index 0000000000000..7028928041452 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java @@ -0,0 +1,147 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createMountTableEntry; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.FileNotFoundException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Collections; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test suite for Router Web Hdfs methods. + */ +public class TestRouterWebHdfsMethods { + static final Logger LOG = + LoggerFactory.getLogger(TestRouterWebHdfsMethods.class); + + private static StateStoreDFSCluster cluster; + private static RouterContext router; + private static String httpUri; + + @BeforeClass + public static void globalSetUp() throws Exception { + cluster = new StateStoreDFSCluster(false, 2); + Configuration conf = new RouterConfigBuilder() + .stateStore() + .rpc() + .http() + .admin() + .build(); + cluster.addRouterOverrides(conf); + cluster.startCluster(); + cluster.startRouters(); + cluster.waitClusterUp(); + router = cluster.getRandomRouter(); + httpUri = "http://"+router.getHttpAddress(); + } + + @AfterClass + public static void tearDown() { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + @Test + public void testWebHdfsCreate() throws Exception { + // the file is created at default ns (ns0) + String path = "/tmp/file"; + URL url = new URL(getUri(path)); + LOG.info("URL: {}", url); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("PUT"); + assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); + verifyFile("ns0", path, true); + verifyFile("ns1", path, false); + conn.disconnect(); + } + + @Test + public void testWebHdfsCreateWithMounts() throws Exception { + // the file is created at mounted ns (ns1) + String mountPoint = "/tmp-ns1"; + String path = "/tmp-ns1/file"; + createMountTableEntry( + router.getRouter(), mountPoint, + DestinationOrder.RANDOM, Collections.singletonList("ns1")); + URL url = new URL(getUri(path)); + LOG.info("URL: {}", url); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("PUT"); + assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); + verifyFile("ns1", path, true); + verifyFile("ns0", path, false); + conn.disconnect(); + } + + private String getUri(String path) { + final String user = System.getProperty("user.name"); + final StringBuilder uri = new StringBuilder(httpUri); + uri.append("/webhdfs/v1"). + append(path). + append("?op=CREATE"). + append("&user.name=" + user); + return uri.toString(); + } + + private void verifyFile(String ns, String path, boolean shouldExist) + throws Exception { + FileSystem fs = cluster.getNamenode(ns, null).getFileSystem(); + try { + fs.getFileStatus(new Path(path)); + if (!shouldExist) { + fail(path + " should not exist in ns " + ns); + } + } catch (FileNotFoundException e) { + if (shouldExist) { + fail(path + " should exist in ns " + ns); + } + } + } + + @Test + public void testGetNsFromDataNodeNetworkLocation() { + assertEquals("ns0", RouterWebHdfsMethods + .getNsFromDataNodeNetworkLocation("/ns0/rack-info1")); + assertEquals("ns0", RouterWebHdfsMethods + .getNsFromDataNodeNetworkLocation("/ns0/row1/rack-info1")); + assertEquals("", RouterWebHdfsMethods + .getNsFromDataNodeNetworkLocation("/row0")); + assertEquals("", RouterWebHdfsMethods + .getNsFromDataNodeNetworkLocation("whatever-rack-info1")); + } +} From b6612792da8c73912436343476c821923430011f Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 16 Apr 2021 13:26:45 +0900 Subject: [PATCH 237/666] HDFS-15977. Call explicit_bzero only if it is available. (#2914) Reviewed-by: Masatake Iwasaki Reviewed-by: Inigo Goiri --- .../src/main/native/libhdfspp/CMakeLists.txt | 6 ++++++ .../main/native/libhdfspp/lib/x-platform/syscall_linux.cc | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt index b03ba68172228..d23f84941db9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt @@ -51,6 +51,7 @@ find_package(GSasl) find_package(Threads) include(CheckCXXSourceCompiles) +include(CheckSymbolExists) # Download and build gtest configure_file(CMakeLists-gtest.txt.in googletest-download/CMakeLists.txt) @@ -168,6 +169,11 @@ else (NOT NO_SASL) message(STATUS "Compiling with NO SASL SUPPORT") endif (NOT NO_SASL) +check_symbol_exists(explicit_bzero "string.h" HAVE_EXPLICIT_BZERO) +if(HAVE_EXPLICIT_BZERO) + add_definitions(-DHAVE_EXPLICIT_BZERO) +endif() + add_definitions(-DASIO_STANDALONE -DASIO_CPP11_DATE_TIME) # Disable optimizations if compiling debug diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc index 9903bbe853ba3..ff02d2fa65cf5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc @@ -47,7 +47,12 @@ bool XPlatform::Syscall::WriteToStdoutImpl(const char* message) { void XPlatform::Syscall::ClearBufferSafely(void* buffer, const size_t sz_bytes) { if (buffer != nullptr) { +#ifdef HAVE_EXPLICIT_BZERO explicit_bzero(buffer, sz_bytes); +#else + // fallback to bzero + bzero(buffer, sz_bytes); +#endif } } From 1f1b930cf4c9bcd675f9b7ceb9a5751cb9c7d3e5 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Fri, 16 Apr 2021 12:36:01 +0530 Subject: [PATCH 238/666] HADOOP-17633. Bump json-smart to 2.4.2 and nimbus-jose-jwt to 9.8 due to CVEs (#2895). Contributed by Viraj Jasani. Signed-off-by: Ayush Saxena --- LICENSE-binary | 4 ++-- hadoop-project/pom.xml | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/LICENSE-binary b/LICENSE-binary index 198f97c584393..5af4b60f6cf01 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -240,7 +240,7 @@ com.google.guava:guava:20.0 com.google.guava:guava:27.0-jre com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava com.microsoft.azure:azure-storage:7.0.0 -com.nimbusds:nimbus-jose-jwt:4.41.1 +com.nimbusds:nimbus-jose-jwt:9.8.1 com.squareup.okhttp:okhttp:2.7.5 com.squareup.okio:okio:1.6.0 com.zaxxer:HikariCP-java7:2.4.12 @@ -283,7 +283,7 @@ javax.inject:javax.inject:1 log4j:log4j:1.2.17 net.java.dev.jna:jna:5.2.0 net.minidev:accessors-smart:1.2 -net.minidev:json-smart:2.3 +net.minidev:json-smart:2.4.2 org.apache.avro:avro:1.7.7 org.apache.commons:commons-collections4:4.2 org.apache.commons:commons-compress:1.19 diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index aeee30ab9433d..173749eeeadd1 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -210,6 +210,8 @@ 1.0.7.Final 1.0.2 5.3.0 + 2.4.2 + 9.8.1 @@ -1566,7 +1568,7 @@ com.nimbusds nimbus-jose-jwt - 7.9 + ${nimbus-jose-jwt.version} compile @@ -1589,7 +1591,7 @@ --> net.minidev json-smart - 2.3 + ${json-smart.version} org.skyscreamer From a0a2860c79bd1d1d22b4ec91f3b3436c442009ab Mon Sep 17 00:00:00 2001 From: zhangshuyan0 <81411509+zhangshuyan0@users.noreply.github.com> Date: Sat, 17 Apr 2021 00:08:31 +0800 Subject: [PATCH 239/666] HDFS-15963. Unreleased volume references cause an infinite loop. (#2889) Contributed by Shuyan Zhang. Reviewed-by: Wei-Chiu Chuang Reviewed-by: He Xiaoqiao --- .../hdfs/server/datanode/BlockSender.java | 1 + .../impl/FsDatasetAsyncDiskService.java | 71 +++++++++++-------- .../datanode/fsdataset/impl/FsVolumeImpl.java | 2 +- .../impl/RamDiskAsyncLazyPersistService.java | 29 +++++--- .../hadoop/hdfs/TestDataTransferProtocol.java | 55 ++++++++++++++ .../fsdataset/impl/TestFsDatasetImpl.java | 34 +++++++++ .../fsdataset/impl/TestLazyPersistFiles.java | 37 ++++++++++ 7 files changed, 188 insertions(+), 41 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index fb6d83fcc6bd6..bb75e3aceb6be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -432,6 +432,7 @@ class BlockSender implements java.io.Closeable { ris = new ReplicaInputStreams( blockIn, checksumIn, volumeRef, fileIoProvider); } catch (IOException ioe) { + IOUtils.cleanupWithLogger(null, volumeRef); IOUtils.closeStream(this); org.apache.commons.io.IOUtils.closeQuietly(blockIn); org.apache.commons.io.IOUtils.closeQuietly(checksumIn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java index 2a89a80d17a25..706c078e648b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java @@ -167,18 +167,26 @@ synchronized long countPendingDeletions() { * Execute the task sometime in the future, using ThreadPools. */ synchronized void execute(FsVolumeImpl volume, Runnable task) { - if (executors == null) { - throw new RuntimeException("AsyncDiskService is already shutdown"); - } - if (volume == null) { - throw new RuntimeException("A null volume does not have a executor"); - } - ThreadPoolExecutor executor = executors.get(volume.getStorageID()); - if (executor == null) { - throw new RuntimeException("Cannot find volume " + volume - + " for execution of task " + task); - } else { - executor.execute(task); + try { + if (executors == null) { + throw new RuntimeException("AsyncDiskService is already shutdown"); + } + if (volume == null) { + throw new RuntimeException("A null volume does not have a executor"); + } + ThreadPoolExecutor executor = executors.get(volume.getStorageID()); + if (executor == null) { + throw new RuntimeException("Cannot find volume " + volume + + " for execution of task " + task); + } else { + executor.execute(task); + } + } catch (RuntimeException re) { + if (task instanceof ReplicaFileDeleteTask) { + IOUtils.cleanupWithLogger(null, + ((ReplicaFileDeleteTask) task).volumeRef); + } + throw re; } } @@ -314,28 +322,31 @@ private boolean moveFiles() { @Override public void run() { - final long blockLength = replicaToDelete.getBlockDataLength(); - final long metaLength = replicaToDelete.getMetadataLength(); - boolean result; + try { + final long blockLength = replicaToDelete.getBlockDataLength(); + final long metaLength = replicaToDelete.getMetadataLength(); + boolean result; - result = (trashDirectory == null) ? deleteFiles() : moveFiles(); + result = (trashDirectory == null) ? deleteFiles() : moveFiles(); - if (!result) { - LOG.warn("Unexpected error trying to " - + (trashDirectory == null ? "delete" : "move") - + " block " + block.getBlockPoolId() + " " + block.getLocalBlock() - + " at file " + replicaToDelete.getBlockURI() + ". Ignored."); - } else { - if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){ - datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID()); + if (!result) { + LOG.warn("Unexpected error trying to " + + (trashDirectory == null ? "delete" : "move") + + " block " + block.getBlockPoolId() + " " + block.getLocalBlock() + + " at file " + replicaToDelete.getBlockURI() + ". Ignored."); + } else { + if (block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK) { + datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID()); + } + volume.onBlockFileDeletion(block.getBlockPoolId(), blockLength); + volume.onMetaFileDeletion(block.getBlockPoolId(), metaLength); + LOG.info("Deleted " + block.getBlockPoolId() + " " + + block.getLocalBlock() + " URI " + replicaToDelete.getBlockURI()); } - volume.onBlockFileDeletion(block.getBlockPoolId(), blockLength); - volume.onMetaFileDeletion(block.getBlockPoolId(), metaLength); - LOG.info("Deleted " + block.getBlockPoolId() + " " - + block.getLocalBlock() + " URI " + replicaToDelete.getBlockURI()); + updateDeletedBlockId(block); + } finally { + IOUtils.cleanupWithLogger(null, this.volumeRef); } - updateDeletedBlockId(block); - IOUtils.cleanupWithLogger(null, volumeRef); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 07e14fb04e48e..6681f6fd64c0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -319,7 +319,7 @@ private void checkReference() { } @VisibleForTesting - int getReferenceCount() { + public int getReferenceCount() { return this.reference.getReferenceCount(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java index a77faf2cec8bc..0d42ae99e358e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; +import org.apache.hadoop.io.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -153,16 +154,24 @@ synchronized boolean queryVolume(FsVolumeImpl volume) { * Execute the task sometime in the future, using ThreadPools. */ synchronized void execute(String storageId, Runnable task) { - if (executors == null) { - throw new RuntimeException( - "AsyncLazyPersistService is already shutdown"); - } - ThreadPoolExecutor executor = executors.get(storageId); - if (executor == null) { - throw new RuntimeException("Cannot find root storage volume with id " + - storageId + " for execution of task " + task); - } else { - executor.execute(task); + try { + if (executors == null) { + throw new RuntimeException( + "AsyncLazyPersistService is already shutdown"); + } + ThreadPoolExecutor executor = executors.get(storageId); + if (executor == null) { + throw new RuntimeException("Cannot find root storage volume with id " + + storageId + " for execution of task " + task); + } else { + executor.execute(task); + } + } catch (RuntimeException re) { + if (task instanceof ReplicaLazyPersistTask) { + IOUtils.cleanupWithLogger(null, + ((ReplicaLazyPersistTask) task).targetVolume); + } + throw re; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index b9da5f446f92f..b1a675c77b62a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -33,6 +33,9 @@ import java.nio.ByteBuffer; import java.util.Random; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -562,4 +565,56 @@ void writeBlock(ExtendedBlock block, BlockConstructionStage stage, checksum, CachingStrategy.newDefaultStrategy(), false, false, null, null, new String[0]); } + + @Test(timeout = 30000) + public void testReleaseVolumeRefIfExceptionThrown() + throws IOException, InterruptedException { + Path file = new Path("dataprotocol.dat"); + int numDataNodes = 1; + + Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes( + numDataNodes).build(); + try { + cluster.waitActive(); + datanode = cluster.getFileSystem().getDataNodeStats( + DatanodeReportType.LIVE)[0]; + dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr()); + FileSystem fileSys = cluster.getFileSystem(); + + int fileLen = Math.min( + conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096); + + DFSTestUtil.createFile(fileSys, file, fileLen, fileLen, + fileSys.getDefaultBlockSize(file), + fileSys.getDefaultReplication(file), 0L); + + // Get the first blockid for the file. + final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file); + + String bpid = cluster.getNamesystem().getBlockPoolId(); + ExtendedBlock blk = new ExtendedBlock(bpid, firstBlock.getLocalBlock()); + sendBuf.reset(); + recvBuf.reset(); + + // Delete the meta file to create a exception in BlockSender constructor. + DataNode dn = cluster.getDataNodes().get(0); + cluster.getMaterializedReplica(0, blk).deleteMeta(); + + FsVolumeImpl volume = (FsVolumeImpl) DataNodeTestUtils.getFSDataset( + dn).getVolume(blk); + int beforeCnt = volume.getReferenceCount(); + + sender.copyBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN); + sendRecvData("Copy a block.", false); + Thread.sleep(3000); + + int afterCnt = volume.getReferenceCount(); + assertEquals(beforeCnt, afterCnt); + + } finally { + cluster.shutdown(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 6ae6248d3f997..778ef97180b41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.DF; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner; +import org.apache.hadoop.hdfs.server.datanode.LocalReplica; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.OutputStream; @@ -1805,4 +1806,37 @@ public void testNotifyNamenodeMissingOrNewBlock() throws Exception { cluster.shutdown(); } } + + @Test(timeout = 20000) + public void testReleaseVolumeRefIfExceptionThrown() throws IOException { + MiniDFSCluster cluster = new MiniDFSCluster.Builder( + new HdfsConfiguration()).build(); + cluster.waitActive(); + FsVolumeImpl vol = (FsVolumeImpl) dataset.getFsVolumeReferences().get(0); + ExtendedBlock eb; + ReplicaInfo info; + int beforeCnt = 0; + try { + List blockList = new ArrayList(); + eb = new ExtendedBlock(BLOCKPOOL, 1, 1, 1001); + info = new FinalizedReplica( + eb.getLocalBlock(), vol, vol.getCurrentDir().getParentFile()); + dataset.volumeMap.add(BLOCKPOOL, info); + ((LocalReplica) info).getBlockFile().createNewFile(); + ((LocalReplica) info).getMetaFile().createNewFile(); + blockList.add(info); + + // Create a runtime exception. + dataset.asyncDiskService.shutdown(); + + beforeCnt = vol.getReferenceCount(); + dataset.invalidate(BLOCKPOOL, blockList.toArray(new Block[0])); + + } catch (RuntimeException re) { + int afterCnt = vol.getReferenceCount(); + assertEquals(beforeCnt, afterCnt); + } finally { + cluster.shutdown(); + } + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java index c0b4b17ea9566..14ed26e9b5544 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ThreadUtil; import org.junit.Assert; @@ -280,4 +283,38 @@ public void run() { } } } + + @Test(timeout = 20000) + public void testReleaseVolumeRefIfExceptionThrown() + throws IOException, InterruptedException { + getClusterBuilder().setRamDiskReplicaCapacity(2).build(); + final String methodName = GenericTestUtils.getMethodName(); + final int seed = 0xFADED; + Path path = new Path("/" + methodName + ".Writer.File.dat"); + + DataNode dn = cluster.getDataNodes().get(0); + FsDatasetSpi.FsVolumeReferences volumes = + DataNodeTestUtils.getFSDataset(dn).getFsVolumeReferences(); + int[] beforeCnts = new int[volumes.size()]; + FsDatasetImpl ds = (FsDatasetImpl) DataNodeTestUtils.getFSDataset(dn); + + // Create a runtime exception. + ds.asyncLazyPersistService.shutdown(); + for (int i = 0; i < volumes.size(); ++i) { + beforeCnts[i] = ((FsVolumeImpl) volumes.get(i)).getReferenceCount(); + } + + makeRandomTestFile(path, BLOCK_SIZE, true, seed); + Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000); + + for (int i = 0; i < volumes.size(); ++i) { + int afterCnt = ((FsVolumeImpl) volumes.get(i)).getReferenceCount(); + // LazyWriter keeps trying to save copies even if + // asyncLazyPersistService is already shutdown. + // If we do not release references, the number of + // references will increase infinitely. + Assert.assertTrue( + beforeCnts[i] == afterCnt || beforeCnts[i] == (afterCnt - 1)); + } + } } From 93453167ee801f92eb8757ba57dc6dfb8429a1db Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Fri, 16 Apr 2021 17:15:33 +0000 Subject: [PATCH 240/666] Revert "HDFS-15971. Make mkstemp cross platform (#2898)" This reverts commit b088f46b684ccefb401b9a3be197d75e3bc64f6b. --- .../src/CMakeLists.txt | 3 +- .../libhdfs-tests/test_libhdfs_mini_stress.c | 4 +- .../libhdfspp/lib/x-platform/c-api/syscall.cc | 22 +------ .../libhdfspp/lib/x-platform/c-api/syscall.h | 3 - .../native/libhdfspp/lib/x-platform/syscall.h | 29 --------- .../libhdfspp/lib/x-platform/syscall_linux.cc | 11 ---- .../lib/x-platform/syscall_windows.cc | 30 --------- .../native/libhdfspp/tests/CMakeLists.txt | 13 ++-- .../libhdfspp/tests/configuration_test.cc | 23 +++---- .../libhdfspp/tests/configuration_test.h | 64 ++++--------------- .../libhdfspp/tests/hdfs_builder_test.cc | 4 +- .../tests/hdfs_configuration_test.cc | 16 ++--- .../tests/x-platform/syscall_common_test.cc | 18 ------ 13 files changed, 41 insertions(+), 199 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt index d8c2012fb88de..7c076bd790085 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt @@ -91,8 +91,7 @@ function(build_libhdfs_test NAME LIBRARY) list(APPEND FILES ${CMAKE_SOURCE_DIR}/main/native/libhdfs-tests/${FIL}) endif() endforeach() - add_executable("${NAME}_${LIBRARY}" $ $ ${FILES}) - target_include_directories("${NAME}_${LIBRARY}" PRIVATE main/native/libhdfspp/lib) + add_executable("${NAME}_${LIBRARY}" ${FILES}) endfunction() function(add_libhdfs_test NAME LIBRARY) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c index 1641470733f2c..846852bfd0e88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c @@ -22,7 +22,6 @@ #include "hdfspp/hdfs_ext.h" #include "native_mini_dfs.h" #include "os/thread.h" -#include "x-platform/c-api/syscall.h" #include #include @@ -127,8 +126,7 @@ static int hdfsCurlData(const char *host, const tPort port, const char *dirNm, EXPECT_NONNULL(pw = getpwuid(uid)); int fd = -1; - EXPECT_NONNEGATIVE(fd = x_platform_syscall_create_and_open_temp_file( - tmpFile, sizeof tmpFile)); + EXPECT_NONNEGATIVE(fd = mkstemp(tmpFile)); tSize sz = 0; while (sz < fileSz) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc index 0bb5fc15ef631..cca9f6a024807 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.cc @@ -18,26 +18,6 @@ #include "x-platform/syscall.h" -#include -#include - -extern "C" { -int x_platform_syscall_write_to_stdout(const char* msg) { +extern "C" int x_platform_syscall_write_to_stdout(const char* msg) { return XPlatform::Syscall::WriteToStdout(msg) ? 1 : 0; } - -int x_platform_syscall_create_and_open_temp_file(char* pattern, - const size_t pattern_len) { - std::vector pattern_vec(pattern, pattern + pattern_len); - - const auto fd = XPlatform::Syscall::CreateAndOpenTempFile(pattern_vec); - if (fd != -1) { - std::copy_n(pattern_vec.begin(), pattern_len, pattern); - } - return fd; -} - -int x_platform_syscall_close_file(const int fd) { - return XPlatform::Syscall::CloseFile(fd); -} -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h index 93878b144db98..be905ae1d364e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/c-api/syscall.h @@ -24,8 +24,5 @@ */ int x_platform_syscall_write_to_stdout(const char* msg); -int x_platform_syscall_create_and_open_temp_file(char* pattern, - size_t pattern_len); -int x_platform_syscall_close_file(int fd); #endif // NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_C_API_SYSCALL_H diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h index 9959f215b2068..3a77f2dd6d026 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall.h @@ -20,7 +20,6 @@ #define NATIVE_LIBHDFSPP_LIB_CROSS_PLATFORM_SYSCALL #include -#include /** * The {@link XPlatform} namespace contains components that @@ -85,34 +84,6 @@ class Syscall { static bool StringCompareIgnoreCase(const std::string& a, const std::string& b); - /** - * Creates and opens a temporary file with a given {@link pattern}. - * The {@link pattern} must end with a minimum of 6 'X' characters. - * This function will first modify the last 6 'X' characters with - * random character values, which serve as the temporary file name. - * Subsequently opens the file and returns the file descriptor for - * the same. The behaviour of this function is the same as that of - * POSIX mkstemp function. The file must be later closed by the - * application and is not handled by this function. - * - * @param pattern the pattern to be used for the temporary filename. - * @returns an integer representing the file descriptor for the - * opened temporary file. Returns -1 in the case of error and sets - * the global errno with the appropriate error code. - */ - static int CreateAndOpenTempFile(std::vector& pattern); - - /** - * Closes the file corresponding to given {@link file_descriptor}. - * - * @param file_descriptor the file descriptor of the file to close. - * @returns a boolean indicating the status of the call to this - * function. true if it's a success, false in the case of an error. - * The global errno is set if the call to this function was not - * successful. - */ - static bool CloseFile(int file_descriptor); - private: static bool WriteToStdoutImpl(const char* message); }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc index ff02d2fa65cf5..59d93c40b7681 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_linux.cc @@ -21,7 +21,6 @@ #include #include -#include #include "syscall.h" @@ -60,13 +59,3 @@ bool XPlatform::Syscall::StringCompareIgnoreCase(const std::string& a, const std::string& b) { return strcasecmp(a.c_str(), b.c_str()) == 0; } - -int XPlatform::Syscall::CreateAndOpenTempFile(std::vector& pattern) { - // Make space for mkstemp to add NULL character at the end - pattern.resize(pattern.size() + 1); - return mkstemp(pattern.data()); -} - -bool XPlatform::Syscall::CloseFile(const int file_descriptor) { - return close(file_descriptor) == 0; -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc index b5ddd04373841..2cd9e9d5157df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/x-platform/syscall_windows.cc @@ -19,14 +19,7 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include #include #include "syscall.h" @@ -71,26 +64,3 @@ bool XPlatform::Syscall::StringCompareIgnoreCase(const std::string& a, const std::string& b) { return _stricmp(a.c_str(), b.c_str()) == 0; } - -int XPlatform::Syscall::CreateAndOpenTempFile(std::vector& pattern) { - if (_set_errno(0) != 0) { - return -1; - } - - // Make space for _mktemp_s to add NULL character at the end - pattern.resize(pattern.size() + 1); - if (_mktemp_s(pattern.data(), pattern.size()) != 0) { - return -1; - } - - auto fd{-1}; - if (_sopen_s(&fd, pattern.data(), _O_RDWR | _O_CREAT | _O_EXCL, _SH_DENYNO, - _S_IREAD | _S_IWRITE) != 0) { - return -1; - } - return fd; -} - -bool XPlatform::Syscall::CloseFile(const int file_descriptor) { - return _close(file_descriptor) == 0; -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt index d37fcc29f9cbd..32f75f474b8d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt @@ -96,13 +96,11 @@ add_executable(node_exclusion_test node_exclusion_test.cc) target_link_libraries(node_exclusion_test fs gmock_main common ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(node_exclusion node_exclusion_test) -add_executable(configuration_test $ configuration_test.cc) -target_include_directories(configuration_test PRIVATE ../lib) +add_executable(configuration_test configuration_test.cc) target_link_libraries(configuration_test common gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(configuration configuration_test) -add_executable(hdfs_configuration_test $ hdfs_configuration_test.cc) -target_include_directories(hdfs_configuration_test PRIVATE ../lib) +add_executable(hdfs_configuration_test hdfs_configuration_test.cc) target_link_libraries(hdfs_configuration_test common gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(hdfs_configuration hdfs_configuration_test) @@ -110,13 +108,11 @@ add_executable(hdfspp_errors_test hdfspp_errors.cc) target_link_libraries(hdfspp_errors_test common gmock_main bindings_c fs rpc proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(hdfspp_errors hdfspp_errors_test) -add_executable(hdfs_builder_test $ hdfs_builder_test.cc) -target_include_directories(hdfs_builder_test PRIVATE ../lib) +add_executable(hdfs_builder_test hdfs_builder_test.cc) target_link_libraries(hdfs_builder_test test_common gmock_main bindings_c fs rpc proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(hdfs_builder_test hdfs_builder_test) add_executable(logging_test logging_test.cc $) -target_include_directories(logging_test PRIVATE ../lib) target_link_libraries(logging_test common gmock_main bindings_c fs rpc proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} gmock_main ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(logging_test logging_test) @@ -128,8 +124,7 @@ add_executable(user_lock_test user_lock_test.cc) target_link_libraries(user_lock_test fs gmock_main common ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(user_lock user_lock_test) -add_executable(hdfs_config_connect_bugs_test $ hdfs_config_connect_bugs.cc) -target_include_directories(hdfs_config_connect_bugs_test PRIVATE ../lib) +add_executable(hdfs_config_connect_bugs_test hdfs_config_connect_bugs.cc) target_link_libraries(hdfs_config_connect_bugs_test common gmock_main bindings_c fs rpc proto common reader connection ${PROTOBUF_LIBRARIES} ${OPENSSL_LIBRARIES} ${SASL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) add_memcheck_test(hdfs_config_connect_bugs hdfs_config_connect_bugs_test) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.cc index 3bf2524354b6b..9534204c92ca0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.cc @@ -299,12 +299,11 @@ TEST(ConfigurationTest, TestFileReads) // Single stream { TempFile tempFile; - writeSimpleConfig(tempFile.GetFileName(), "key1", "value1"); + writeSimpleConfig(tempFile.filename, "key1", "value1"); ConfigurationLoader config_loader; config_loader.ClearSearchPath(); - optional config = - config_loader.LoadFromFile(tempFile.GetFileName()); + optional config = config_loader.LoadFromFile(tempFile.filename); EXPECT_TRUE(config && "Parse first stream"); EXPECT_EQ("value1", config->GetWithDefault("key1", "")); } @@ -312,18 +311,16 @@ TEST(ConfigurationTest, TestFileReads) // Multiple files { TempFile tempFile; - writeSimpleConfig(tempFile.GetFileName(), "key1", "value1"); + writeSimpleConfig(tempFile.filename, "key1", "value1"); ConfigurationLoader loader; - optional config = - loader.LoadFromFile(tempFile.GetFileName()); + optional config = loader.LoadFromFile(tempFile.filename); ASSERT_TRUE(config && "Parse first stream"); EXPECT_EQ("value1", config->GetWithDefault("key1", "")); TempFile tempFile2; - writeSimpleConfig(tempFile2.GetFileName(), "key2", "value2"); - optional config2 = - loader.OverlayResourceFile(*config, tempFile2.GetFileName()); + writeSimpleConfig(tempFile2.filename, "key2", "value2"); + optional config2 = loader.OverlayResourceFile(*config, tempFile2.filename); ASSERT_TRUE(config2 && "Parse second stream"); EXPECT_EQ("value1", config2->GetWithDefault("key1", "")); EXPECT_EQ("value2", config2->GetWithDefault("key2", "")); @@ -353,13 +350,13 @@ TEST(ConfigurationTest, TestFileReads) { TempDir tempDir1; TempFile tempFile1(tempDir1.path + "/file1.xml"); - writeSimpleConfig(tempFile1.GetFileName(), "key1", "value1"); + writeSimpleConfig(tempFile1.filename, "key1", "value1"); TempDir tempDir2; TempFile tempFile2(tempDir2.path + "/file2.xml"); - writeSimpleConfig(tempFile2.GetFileName(), "key2", "value2"); + writeSimpleConfig(tempFile2.filename, "key2", "value2"); TempDir tempDir3; TempFile tempFile3(tempDir3.path + "/file3.xml"); - writeSimpleConfig(tempFile3.GetFileName(), "key3", "value3"); + writeSimpleConfig(tempFile3.filename, "key3", "value3"); ConfigurationLoader loader; loader.SetSearchPath(tempDir1.path + ":" + tempDir2.path + ":" + tempDir3.path); @@ -380,7 +377,7 @@ TEST(ConfigurationTest, TestDefaultConfigs) { { TempDir tempDir; TempFile coreSite(tempDir.path + "/core-site.xml"); - writeSimpleConfig(coreSite.GetFileName(), "key1", "value1"); + writeSimpleConfig(coreSite.filename, "key1", "value1"); ConfigurationLoader loader; loader.SetSearchPath(tempDir.path); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.h index 23fc0d31047b3..9ad11b70cc8dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/configuration_test.h @@ -21,19 +21,11 @@ #include "hdfspp/config_parser.h" #include "common/configuration.h" #include "common/configuration_loader.h" -#include "x-platform/syscall.h" - #include #include #include -#include -#include -#include - #include -#include #include -#include namespace hdfs { @@ -115,52 +107,24 @@ void writeDamagedConfig(const std::string& filename, Args... args) { // TempDir: is deleted on destruction class TempFile { - public: - TempFile() { - std::vector tmp_buf(filename_.begin(), filename_.end()); - fd_ = XPlatform::Syscall::CreateAndOpenTempFile(tmp_buf); - EXPECT_NE(fd_, -1); - filename_.assign(tmp_buf.data()); - } - - TempFile(std::string fn) : filename_(std::move(fn)) {} - - TempFile(const TempFile& other) = default; - - TempFile(TempFile&& other) noexcept - : filename_{std::move(other.filename_)}, fd_{other.fd_} {} - - TempFile& operator=(const TempFile& other) { - if (&other != this) { - filename_ = other.filename_; - fd_ = other.fd_; - } - return *this; - } - - TempFile& operator=(TempFile&& other) noexcept { - if (&other != this) { - filename_ = std::move(other.filename_); - fd_ = other.fd_; - } - return *this; +public: + std::string filename; + char fn_buffer[128]; + int tempFileHandle; + TempFile() : tempFileHandle(-1) { + strncpy(fn_buffer, "/tmp/test_XXXXXXXXXX", sizeof(fn_buffer)); + tempFileHandle = mkstemp(fn_buffer); + EXPECT_NE(-1, tempFileHandle); + filename = fn_buffer; } - - [[nodiscard]] const std::string& GetFileName() const { return filename_; } - - ~TempFile() { - if (-1 != fd_) { - EXPECT_NE(XPlatform::Syscall::CloseFile(fd_), -1); - } - - unlink(filename_.c_str()); + TempFile(const std::string & fn) : filename(fn), tempFileHandle(-1) { + strncpy(fn_buffer, fn.c_str(), sizeof(fn_buffer)); + fn_buffer[sizeof(fn_buffer)-1] = 0; } - - private: - std::string filename_{"/tmp/test_XXXXXXXXXX"}; - int fd_{-1}; + ~TempFile() { if(-1 != tempFileHandle) close(tempFileHandle); unlink(fn_buffer); } }; + // Callback to remove a directory in the nftw visitor int nftw_remove(const char *fpath, const struct stat *sb, int typeflag, struct FTW *ftwbuf) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_builder_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_builder_test.cc index 147cfee6be82c..01db69d41ef22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_builder_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_builder_test.cc @@ -45,7 +45,7 @@ TEST(HdfsBuilderTest, TestRead) { TempDir tempDir1; TempFile tempFile1(tempDir1.path + "/core-site.xml"); - writeSimpleConfig(tempFile1.GetFileName(), "key1", "value1"); + writeSimpleConfig(tempFile1.filename, "key1", "value1"); hdfsBuilder * builder = hdfsNewBuilderFromDirectory(tempDir1.path.c_str()); @@ -68,7 +68,7 @@ TEST(HdfsBuilderTest, TestRead) { TempDir tempDir1; TempFile tempFile1(tempDir1.path + "/core-site.xml"); - writeSimpleConfig(tempFile1.GetFileName(), "key1", "100"); + writeSimpleConfig(tempFile1.filename, "key1", "100"); hdfsBuilder * builder = hdfsNewBuilderFromDirectory(tempDir1.path.c_str()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_configuration_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_configuration_test.cc index 4e1bc3b65aec9..b21725c50fae7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_configuration_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_configuration_test.cc @@ -72,9 +72,9 @@ TEST(HdfsConfigurationTest, TestDefaultConfigs) { { TempDir tempDir; TempFile coreSite(tempDir.path + "/core-site.xml"); - writeSimpleConfig(coreSite.GetFileName(), "key1", "value1"); + writeSimpleConfig(coreSite.filename, "key1", "value1"); TempFile hdfsSite(tempDir.path + "/hdfs-site.xml"); - writeSimpleConfig(hdfsSite.GetFileName(), "key2", "value2"); + writeSimpleConfig(hdfsSite.filename, "key2", "value2"); ConfigurationLoader loader; loader.SetSearchPath(tempDir.path); @@ -89,7 +89,7 @@ TEST(HdfsConfigurationTest, TestDefaultConfigs) { { TempDir tempDir; TempFile coreSite(tempDir.path + "/core-site.xml"); - writeSimpleConfig(coreSite.GetFileName(), "key1", "value1"); + writeSimpleConfig(coreSite.filename, "key1", "value1"); ConfigurationLoader loader; loader.SetSearchPath(tempDir.path); @@ -103,7 +103,7 @@ TEST(HdfsConfigurationTest, TestDefaultConfigs) { { TempDir tempDir; TempFile hdfsSite(tempDir.path + "/hdfs-site.xml"); - writeSimpleConfig(hdfsSite.GetFileName(), "key2", "value2"); + writeSimpleConfig(hdfsSite.filename, "key2", "value2"); ConfigurationLoader loader; loader.SetSearchPath(tempDir.path); @@ -121,9 +121,9 @@ TEST(HdfsConfigurationTest, TestConfigParserAPI) { { TempDir tempDir; TempFile coreSite(tempDir.path + "/core-site.xml"); - writeSimpleConfig(coreSite.GetFileName(), "key1", "value1"); + writeSimpleConfig(coreSite.filename, "key1", "value1"); TempFile hdfsSite(tempDir.path + "/hdfs-site.xml"); - writeSimpleConfig(hdfsSite.GetFileName(), "key2", "value2"); + writeSimpleConfig(hdfsSite.filename, "key2", "value2"); ConfigParser parser(tempDir.path); @@ -142,9 +142,9 @@ TEST(HdfsConfigurationTest, TestConfigParserAPI) { { TempDir tempDir; TempFile coreSite(tempDir.path + "/core-site.xml"); - writeSimpleConfig(coreSite.GetFileName(), "key1", "value1"); + writeSimpleConfig(coreSite.filename, "key1", "value1"); TempFile hdfsSite(tempDir.path + "/hdfs-site.xml"); - writeDamagedConfig(hdfsSite.GetFileName(), "key2", "value2"); + writeDamagedConfig(hdfsSite.filename, "key2", "value2"); ConfigParser parser(tempDir.path); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc index a7847e1db3547..7fa3971cf7edd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/x-platform/syscall_common_test.cc @@ -85,21 +85,3 @@ TEST(XPlatformSyscall, StringCompareIgnoreCaseNegative) { EXPECT_FALSE(XPlatform::Syscall::StringCompareIgnoreCase("abcd", "abcde")); EXPECT_FALSE(XPlatform::Syscall::StringCompareIgnoreCase("12345", "abcde")); } - -TEST(XPlatformSyscall, CreateAndOpenTempFileBasic) { - std::string pattern("tmp-XXXXXX"); - std::vector pattern_vec(pattern.begin(), pattern.end()); - - const auto fd = XPlatform::Syscall::CreateAndOpenTempFile(pattern_vec); - EXPECT_GT(fd, -1); - EXPECT_TRUE(XPlatform::Syscall::CloseFile(fd)); -} - -TEST(XPlatformSyscall, CreateAndOpenTempFileNegative) { - std::string pattern("does-not-adhere-to-pattern"); - std::vector pattern_vec(pattern.begin(), pattern.end()); - - const auto fd = XPlatform::Syscall::CreateAndOpenTempFile(pattern_vec); - EXPECT_EQ(fd, -1); - EXPECT_FALSE(XPlatform::Syscall::CloseFile(fd)); -} From 2dd63b589b925447669d7fdf339a0ca3226b86e8 Mon Sep 17 00:00:00 2001 From: litao Date: Sat, 17 Apr 2021 15:49:09 +0800 Subject: [PATCH 241/666] HDFS-15975. Use LongAdder instead of AtomicLong (#2907) --- .../metrics2/lib/MutableCounterLong.java | 10 +++---- .../hadoop/hdfs/DFSHedgedReadMetrics.java | 14 +++++----- .../hadoop/hdfs/DFSOpsCountStatistics.java | 20 ++++++------- .../fsdataset/impl/FsDatasetCache.java | 28 +++++++++---------- .../fsdataset/impl/FsDatasetImpl.java | 2 +- .../hdfs/server/namenode/FSEditLog.java | 10 +++---- .../server/namenode/FSNamesystemLock.java | 13 +++++---- .../org/apache/hadoop/hdfs/TestPread.java | 8 +++--- 8 files changed, 53 insertions(+), 52 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java index d3dec2e4d06e2..efaf8a14eaf42 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java @@ -23,7 +23,7 @@ import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; /** * A mutable long counter @@ -32,11 +32,11 @@ @InterfaceStability.Evolving public class MutableCounterLong extends MutableCounter { - private AtomicLong value = new AtomicLong(); + private final LongAdder value = new LongAdder(); public MutableCounterLong(MetricsInfo info, long initValue) { super(info); - this.value.set(initValue); + this.value.add(initValue); } @Override @@ -49,12 +49,12 @@ public void incr() { * @param delta of the increment */ public void incr(long delta) { - value.addAndGet(delta); + value.add(delta); setChanged(); } public long value() { - return value.get(); + return value.longValue(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java index 2a228e8d01886..1cd9e82cebb08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java @@ -19,7 +19,7 @@ import org.apache.hadoop.classification.InterfaceAudience; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; /** * The client-side metrics for hedged read feature. @@ -28,20 +28,20 @@ */ @InterfaceAudience.Private public class DFSHedgedReadMetrics { - public final AtomicLong hedgedReadOps = new AtomicLong(); - public final AtomicLong hedgedReadOpsWin = new AtomicLong(); - public final AtomicLong hedgedReadOpsInCurThread = new AtomicLong(); + public final LongAdder hedgedReadOps = new LongAdder(); + public final LongAdder hedgedReadOpsWin = new LongAdder(); + public final LongAdder hedgedReadOpsInCurThread = new LongAdder(); public void incHedgedReadOps() { - hedgedReadOps.incrementAndGet(); + hedgedReadOps.increment(); } public void incHedgedReadOpsInCurThread() { - hedgedReadOpsInCurThread.incrementAndGet(); + hedgedReadOpsInCurThread.increment(); } public void incHedgedReadWins() { - hedgedReadOpsWin.incrementAndGet(); + hedgedReadOpsWin.increment(); } public long getHedgedReadOps() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java index fdd0072905fd4..04fef2d5dcd23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java @@ -26,7 +26,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; /** * This storage statistics tracks how many times each DFS operation was issued. @@ -141,21 +141,21 @@ public static OpType fromSymbol(String symbol) { public static final String NAME = "DFSOpsCountStatistics"; - private final Map opsCount = new EnumMap<>(OpType.class); + private final Map opsCount = new EnumMap<>(OpType.class); public DFSOpsCountStatistics() { super(NAME); for (OpType opType : OpType.values()) { - opsCount.put(opType, new AtomicLong(0)); + opsCount.put(opType, new LongAdder()); } } public void incrementOpCounter(OpType op) { - opsCount.get(op).addAndGet(1); + opsCount.get(op).increment(); } private class LongIterator implements Iterator { - private Iterator> iterator = + private final Iterator> iterator = opsCount.entrySet().iterator(); @Override @@ -168,9 +168,9 @@ public LongStatistic next() { if (!iterator.hasNext()) { throw new NoSuchElementException(); } - final Entry entry = iterator.next(); + final Entry entry = iterator.next(); return new LongStatistic(entry.getKey().getSymbol(), - entry.getValue().get()); + entry.getValue().longValue()); } @Override @@ -192,7 +192,7 @@ public Iterator getLongStatistics() { @Override public Long getLong(String key) { final OpType type = OpType.fromSymbol(key); - return type == null ? null : opsCount.get(type).get(); + return type == null ? null : opsCount.get(type).longValue(); } @Override @@ -202,8 +202,8 @@ public boolean isTracked(String key) { @Override public void reset() { - for (AtomicLong count : opsCount.values()) { - count.set(0); + for (LongAdder count : opsCount.values()) { + count.reset(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index b6a57fdeffa77..facace28604a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -42,7 +42,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.time.DurationFormatUtils; @@ -120,7 +120,7 @@ public boolean shouldAdvertise() { private final HashMap mappableBlockMap = new HashMap(); - private final AtomicLong numBlocksCached = new AtomicLong(0); + private final LongAdder numBlocksCached = new LongAdder(); private final FsDatasetImpl dataset; @@ -143,11 +143,11 @@ public boolean shouldAdvertise() { /** * Number of cache commands that could not be completed successfully */ - final AtomicLong numBlocksFailedToCache = new AtomicLong(0); + final LongAdder numBlocksFailedToCache = new LongAdder(); /** * Number of uncache commands that could not be completed successfully */ - final AtomicLong numBlocksFailedToUncache = new AtomicLong(0); + final LongAdder numBlocksFailedToUncache = new LongAdder(); public FsDatasetCache(FsDatasetImpl dataset) throws IOException { this.dataset = dataset; @@ -204,7 +204,7 @@ public void initCache(String bpid) throws IOException { for (Map.Entry entry : entrySet) { mappableBlockMap.put(entry.getKey(), new Value(keyToMappableBlock.get(entry.getKey()), State.CACHED)); - numBlocksCached.addAndGet(1); + numBlocksCached.increment(); dataset.datanode.getMetrics().incrBlocksCached(1); } } @@ -278,7 +278,7 @@ synchronized void cacheBlock(long blockId, String bpid, LOG.debug("Block with id {}, pool {} already exists in the " + "FsDatasetCache with state {}", blockId, bpid, prevValue.state ); - numBlocksFailedToCache.incrementAndGet(); + numBlocksFailedToCache.increment(); return; } mappableBlockMap.put(key, new Value(null, State.CACHING)); @@ -301,7 +301,7 @@ synchronized void uncacheBlock(String bpid, long blockId) { LOG.debug("Block with id {}, pool {} does not need to be uncached, " + "because it is not currently in the mappableBlockMap.", blockId, bpid); - numBlocksFailedToUncache.incrementAndGet(); + numBlocksFailedToUncache.increment(); return; } switch (prevValue.state) { @@ -331,7 +331,7 @@ synchronized void uncacheBlock(String bpid, long blockId) { default: LOG.debug("Block with id {}, pool {} does not need to be uncached, " + "because it is in state {}.", blockId, bpid, prevValue.state); - numBlocksFailedToUncache.incrementAndGet(); + numBlocksFailedToUncache.increment(); break; } } @@ -469,7 +469,7 @@ public void run() { dataset.datanode. getShortCircuitRegistry().processBlockMlockEvent(key); } - numBlocksCached.addAndGet(1); + numBlocksCached.increment(); dataset.datanode.getMetrics().incrBlocksCached(1); success = true; } finally { @@ -482,7 +482,7 @@ public void run() { LOG.debug("Caching of {} was aborted. We are now caching only {} " + "bytes in total.", key, cacheLoader.getCacheUsed()); IOUtils.closeQuietly(mappableBlock); - numBlocksFailedToCache.incrementAndGet(); + numBlocksFailedToCache.increment(); synchronized (FsDatasetCache.this) { mappableBlockMap.remove(key); @@ -561,7 +561,7 @@ public void run() { } long newUsedBytes = cacheLoader. release(key, value.mappableBlock.getLength()); - numBlocksCached.addAndGet(-1); + numBlocksCached.decrement(); dataset.datanode.getMetrics().incrBlocksUncached(1); if (revocationTimeMs != 0) { LOG.debug("Uncaching of {} completed. usedBytes = {}", @@ -607,15 +607,15 @@ public long getCacheCapacity() { } public long getNumBlocksFailedToCache() { - return numBlocksFailedToCache.get(); + return numBlocksFailedToCache.longValue(); } public long getNumBlocksFailedToUncache() { - return numBlocksFailedToUncache.get(); + return numBlocksFailedToUncache.longValue(); } public long getNumBlocksCached() { - return numBlocksCached.get(); + return numBlocksCached.longValue(); } public synchronized boolean isCached(String bpid, long blockId) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index d06d3cfec6697..d148f774b98b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -2463,7 +2463,7 @@ private void cacheBlock(String bpid, long blockId) { success = true; } finally { if (!success) { - cacheManager.numBlocksFailedToCache.incrementAndGet(); + cacheManager.numBlocksFailedToCache.increment(); } } blockFileName = info.getBlockURI().toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 6b73bbdff436b..79f039bce92c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -28,7 +28,7 @@ import java.util.EnumSet; import java.util.Iterator; import java.util.List; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -183,7 +183,7 @@ private enum State { // these are statistics counters. private long numTransactions; // number of transactions - private final AtomicLong numTransactionsBatchedInSync = new AtomicLong(); + private final LongAdder numTransactionsBatchedInSync = new LongAdder(); private long totalTimeTransactions; // total time for all transactions private NameNodeMetrics metrics; @@ -731,7 +731,7 @@ protected void logSync(long mytxid) { if (metrics != null) { // Metrics non-null only when used inside name node metrics.addSync(elapsed); metrics.incrTransactionsBatchedInSync(editsBatchedInSync); - numTransactionsBatchedInSync.addAndGet(editsBatchedInSync); + numTransactionsBatchedInSync.add(editsBatchedInSync); } } finally { @@ -771,7 +771,7 @@ private void printStatistics(boolean force) { .append(" Total time for transactions(ms): ") .append(totalTimeTransactions) .append(" Number of transactions batched in Syncs: ") - .append(numTransactionsBatchedInSync.get()) + .append(numTransactionsBatchedInSync.longValue()) .append(" Number of syncs: ") .append(editLogStream.getNumSync()) .append(" SyncTimes(ms): ") @@ -1404,7 +1404,7 @@ private void startLogSegment(final long segmentTxId, int layoutVersion) numTransactions = 0; totalTimeTransactions = 0; - numTransactionsBatchedInSync.set(0L); + numTransactionsBatchedInSync.reset(); // TODO no need to link this back to storage anymore! // See HDFS-2174. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java index 842c6b3f2df59..b4f479fa93c89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java @@ -22,6 +22,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Supplier; @@ -113,12 +114,12 @@ public Long initialValue() { * The number of time the read lock * has been held longer than the threshold. */ - private final AtomicLong numReadLockLongHold = new AtomicLong(0); + private final LongAdder numReadLockLongHold = new LongAdder(); /** * The number of time the write lock * has been held for longer than the threshold. */ - private final AtomicLong numWriteLockLongHold = new AtomicLong(0); + private final LongAdder numWriteLockLongHold = new LongAdder(); @VisibleForTesting static final String OP_NAME_OTHER = "OTHER"; @@ -192,7 +193,7 @@ public void readUnlock(String opName, final long readLockIntervalMs = TimeUnit.NANOSECONDS.toMillis(readLockIntervalNanos); if (needReport && readLockIntervalMs >= this.readLockReportingThresholdMs) { - numReadLockLongHold.incrementAndGet(); + numReadLockLongHold.increment(); String lockReportInfo = null; boolean done = false; while (!done) { @@ -309,7 +310,7 @@ private void writeUnlock(String opName, boolean suppressWriteLockReport, LogAction logAction = LogThrottlingHelper.DO_NOT_LOG; if (needReport && writeLockIntervalMs >= this.writeLockReportingThresholdMs) { - numWriteLockLongHold.incrementAndGet(); + numWriteLockLongHold.increment(); if (longestWriteLockHeldInfo.getIntervalMs() <= writeLockIntervalMs) { String lockReportInfo = lockReportInfoSupplier != null ? " (" + lockReportInfoSupplier.get() + ")" : ""; @@ -382,7 +383,7 @@ public int getQueueLength() { * has been held longer than the threshold */ public long getNumOfReadLockLongHold() { - return numReadLockLongHold.get(); + return numReadLockLongHold.longValue(); } /** @@ -393,7 +394,7 @@ public long getNumOfReadLockLongHold() { * has been held longer than the threshold. */ public long getNumOfWriteLockLongHold() { - return numWriteLockLongHold.get(); + return numWriteLockLongHold.longValue(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index ac3c122aac89d..c1e0dbb8e630f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -401,9 +401,9 @@ public Void answer(InvocationOnMock invocation) throws Throwable { DFSClient dfsClient = fileSys.getClient(); DFSHedgedReadMetrics metrics = dfsClient.getHedgedReadMetrics(); // Metrics instance is static, so we need to reset counts from prior tests. - metrics.hedgedReadOps.set(0); - metrics.hedgedReadOpsWin.set(0); - metrics.hedgedReadOpsInCurThread.set(0); + metrics.hedgedReadOps.reset(); + metrics.hedgedReadOpsWin.reset(); + metrics.hedgedReadOpsInCurThread.reset(); try { Path file1 = new Path("hedgedReadMaxOut.dat"); @@ -590,7 +590,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { String filename = "/hedgedReadMaxOut.dat"; DFSHedgedReadMetrics metrics = dfsClient.getHedgedReadMetrics(); // Metrics instance is static, so we need to reset counts from prior tests. - metrics.hedgedReadOps.set(0); + metrics.hedgedReadOps.reset(); try { Path file = new Path(filename); output = fileSys.create(file, (short) 2); From 6df9c3a7339dd746c6c76eb880bc5c44adc9adea Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Sat, 17 Apr 2021 17:17:44 +0530 Subject: [PATCH 242/666] HADOOP-17642. Remove appender EventCounter to avoid instantiation (#2922) Signed-off-by: Akira Ajisaka --- .../hadoop-common/src/main/conf/log4j.properties | 8 +------- .../src/main/resources/container-log4j.properties | 7 +------ 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 52d2c1ff038e6..5a2ca4d922852 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -20,7 +20,7 @@ hadoop.log.dir=. hadoop.log.file=hadoop.log # Define the root logger to the system property "hadoop.root.logger". -log4j.rootLogger=${hadoop.root.logger}, EventCounter +log4j.rootLogger=${hadoop.root.logger} # Logging Threshold log4j.threshold=ALL @@ -176,12 +176,6 @@ log4j.appender.DNMETRICSRFA.MaxFileSize=64MB log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR #log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN -# -# Event Counter Appender -# Sends counts of logging messages at different severity levels to Hadoop Metrics. -# -log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter - # # shuffle connection log from shuffleHandler diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties index cf499b8b46c0a..678e3a74c897a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties @@ -16,7 +16,7 @@ hadoop.root.logger=DEBUG,CLA yarn.app.mapreduce.shuffle.logger=${hadoop.root.logger} # Define the root logger to the system property "hadoop.root.logger". -log4j.rootLogger=${hadoop.root.logger}, EventCounter +log4j.rootLogger=${hadoop.root.logger} # Logging Threshold log4j.threshold=ALL @@ -69,8 +69,3 @@ log4j.additivity.org.apache.hadoop.mapreduce.task.reduce=false # log4j.logger.org.apache.hadoop.mapred.Merger=${yarn.app.mapreduce.shuffle.logger} log4j.additivity.org.apache.hadoop.mapred.Merger=false -# -# Event Counter Appender -# Sends counts of logging messages at different severity levels to Hadoop Metrics. -# -log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter From 169c7df1c96e30322e73bc6aa2cdd8fa6eb382b1 Mon Sep 17 00:00:00 2001 From: Masatake Iwasaki Date: Mon, 19 Apr 2021 16:44:16 +0900 Subject: [PATCH 243/666] HADOOP-17635. Update the ubuntu version in the build instruction. (#2931) Reviewed-by: Akira Ajisaka --- BUILDING.txt | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/BUILDING.txt b/BUILDING.txt index 9bbb6dbf891a9..4d18f698b9543 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -51,14 +51,13 @@ Known issues: and run your IDE and Docker etc inside that VM. ---------------------------------------------------------------------------------- -Installing required packages for clean install of Ubuntu 14.04 LTS Desktop: +Installing required packages for clean install of Ubuntu 18.04 LTS Desktop. +(For Ubuntu 20.04, gcc/g++ and cmake bundled with Ubuntu can be used. +Refer to dev-support/docker/Dockerfile): -* Oracle JDK 1.8 (preferred) - $ sudo apt-get purge openjdk* - $ sudo apt-get install software-properties-common - $ sudo add-apt-repository ppa:webupd8team/java +* Open JDK 1.8 $ sudo apt-get update - $ sudo apt-get install oracle-java8-installer + $ sudo apt-get -y install java-8-openjdk * Maven $ sudo apt-get -y install maven * Native libraries @@ -86,12 +85,13 @@ Installing required packages for clean install of Ubuntu 14.04 LTS Desktop: $ curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 $ tar --bzip2 -xf boost_1_72_0.tar.bz2 && cd boost_1_72_0 $ ./bootstrap.sh --prefix=/usr/ - $ ./b2 --without-python install + $ ./b2 --without-python + $ sudo ./b2 --without-python install Optional packages: * Snappy compression (only used for hadoop-mapreduce-client-nativetask) - $ sudo apt-get install snappy libsnappy-dev + $ sudo apt-get install libsnappy-dev * Intel ISA-L library for erasure coding Please refer to https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version (OR https://github.com/01org/isa-l) From 808b62922d86fe91c0f49cdb300391b0f9d876da Mon Sep 17 00:00:00 2001 From: litao Date: Mon, 19 Apr 2021 19:42:30 +0800 Subject: [PATCH 244/666] HDFS-15970. Print network topology on the web (#2896) --- .../server/namenode/NameNodeHttpServer.java | 6 +- .../namenode/NetworkTopologyServlet.java | 187 ++++++++++++++++ .../src/main/webapps/hdfs/dfshealth.html | 1 + .../src/main/webapps/hdfs/explorer.html | 1 + .../namenode/TestNetworkTopologyServlet.java | 202 ++++++++++++++++++ 5 files changed, 395 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 33913227af2ce..7ca52417d9a0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -166,7 +166,7 @@ void start() throws IOException { httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn); httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); - setupServlets(httpServer, conf); + setupServlets(httpServer); httpServer.start(); int connIdx = 0; @@ -243,7 +243,7 @@ void setAliasMap(InMemoryAliasMap aliasMap) { httpServer.setAttribute(ALIASMAP_ATTRIBUTE_KEY, aliasMap); } - private static void setupServlets(HttpServer2 httpServer, Configuration conf) { + private static void setupServlets(HttpServer2 httpServer) { httpServer.addInternalServlet("startupProgress", StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class); httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class, @@ -253,6 +253,8 @@ private static void setupServlets(HttpServer2 httpServer, Configuration conf) { httpServer.addInternalServlet(IsNameNodeActiveServlet.SERVLET_NAME, IsNameNodeActiveServlet.PATH_SPEC, IsNameNodeActiveServlet.class); + httpServer.addInternalServlet("topology", + NetworkTopologyServlet.PATH_SPEC, NetworkTopologyServlet.class); } static FSImage getFsImageFromContext(ServletContext context) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java new file mode 100644 index 0000000000000..5d089718ccffe --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.net.NodeBase; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.util.StringUtils; + +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.HttpHeaders; +import java.io.IOException; +import java.io.PrintStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeSet; + +/** + * A servlet to print out the network topology. + */ +@InterfaceAudience.Private +public class NetworkTopologyServlet extends DfsServlet { + + public static final String PATH_SPEC = "/topology"; + + protected static final String FORMAT_JSON = "json"; + protected static final String FORMAT_TEXT = "text"; + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws IOException { + final ServletContext context = getServletContext(); + + String format = parseAcceptHeader(request); + if (FORMAT_TEXT.equals(format)) { + response.setContentType("text/plain; charset=UTF-8"); + } else if (FORMAT_JSON.equals(format)) { + response.setContentType("application/json; charset=UTF-8"); + } + + NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); + BlockManager bm = nn.getNamesystem().getBlockManager(); + List leaves = bm.getDatanodeManager().getNetworkTopology() + .getLeaves(NodeBase.ROOT); + + try (PrintStream out = new PrintStream( + response.getOutputStream(), false, "UTF-8")) { + printTopology(out, leaves, format); + } catch (Throwable t) { + String errMsg = "Print network topology failed. " + + StringUtils.stringifyException(t); + response.sendError(HttpServletResponse.SC_GONE, errMsg); + throw new IOException(errMsg); + } finally { + response.getOutputStream().close(); + } + } + + /** + * Display each rack and the nodes assigned to that rack, as determined + * by the NameNode, in a hierarchical manner. The nodes and racks are + * sorted alphabetically. + * + * @param stream print stream + * @param leaves leaves nodes under base scope + * @param format the response format + */ + public void printTopology(PrintStream stream, List leaves, + String format) throws BadFormatException, IOException { + if (leaves.isEmpty()) { + stream.print("No DataNodes"); + return; + } + + // Build a map of rack -> nodes + Map> tree = new HashMap<>(); + for(Node dni : leaves) { + String location = dni.getNetworkLocation(); + String name = dni.getName(); + + tree.putIfAbsent(location, new TreeSet<>()); + tree.get(location).add(name); + } + + // Sort the racks (and nodes) alphabetically, display in order + ArrayList racks = new ArrayList<>(tree.keySet()); + Collections.sort(racks); + + if (FORMAT_JSON.equals(format)) { + printJsonFormat(stream, tree, racks); + } else if (FORMAT_TEXT.equals(format)) { + printTextFormat(stream, tree, racks); + } else { + throw new BadFormatException("Bad format: " + format); + } + } + + private void printJsonFormat(PrintStream stream, Map> tree, ArrayList racks) throws IOException { + JsonFactory dumpFactory = new JsonFactory(); + JsonGenerator dumpGenerator = dumpFactory.createGenerator(stream); + dumpGenerator.writeStartArray(); + + for(String r : racks) { + dumpGenerator.writeStartObject(); + dumpGenerator.writeFieldName(r); + TreeSet nodes = tree.get(r); + dumpGenerator.writeStartArray(); + + for(String n : nodes) { + dumpGenerator.writeStartObject(); + dumpGenerator.writeStringField("ip", n); + String hostname = NetUtils.getHostNameOfIP(n); + if(hostname != null) { + dumpGenerator.writeStringField("hostname", hostname); + } + dumpGenerator.writeEndObject(); + } + dumpGenerator.writeEndArray(); + dumpGenerator.writeEndObject(); + } + dumpGenerator.writeEndArray(); + dumpGenerator.flush(); + + if (!dumpGenerator.isClosed()) { + dumpGenerator.close(); + } + } + + private void printTextFormat(PrintStream stream, Map> tree, ArrayList racks) { + for(String r : racks) { + stream.println("Rack: " + r); + TreeSet nodes = tree.get(r); + + for(String n : nodes) { + stream.print(" " + n); + String hostname = NetUtils.getHostNameOfIP(n); + if(hostname != null) { + stream.print(" (" + hostname + ")"); + } + stream.println(); + } + stream.println(); + } + } + + @VisibleForTesting + static String parseAcceptHeader(HttpServletRequest request) { + String format = request.getHeader(HttpHeaders.ACCEPT); + return format != null && format.contains(FORMAT_JSON) ? + FORMAT_JSON : FORMAT_TEXT; + } + + public static class BadFormatException extends Exception { + private static final long serialVersionUID = 1L; + + public BadFormatException(String msg) { + super(msg); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 6e4eade9566d7..8622e4d3a5681 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -52,6 +52,7 @@

  • Metrics
  • Configuration
  • Process Thread Dump
  • +
  • Network Topology
  • diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html index 73bfbd4527f48..3f0509a229700 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -48,6 +48,7 @@
  • Metrics
  • Configuration
  • Process Thread Dump
  • +
  • Network Topology
  • diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java new file mode 100644 index 0000000000000..7796ed4182ee6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java @@ -0,0 +1,202 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.net.StaticMapping; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestNetworkTopologyServlet { + + @Test + public void testPrintTopologyTextFormat() throws IOException { + StaticMapping.resetMap(); + Configuration conf = new HdfsConfiguration(); + int dataNodesNum = 0; + final ArrayList rackList = new ArrayList(); + for (int i = 0; i < 5; i++) { + for (int j = 0; j < 2; j++) { + rackList.add("/rack" + i); + dataNodesNum++; + } + } + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(dataNodesNum) + .racks(rackList.toArray(new String[rackList.size()])) + .build(); + cluster.waitActive(); + + // get http uri + String httpUri = cluster.getHttpUri(0); + + // send http request + URL url = new URL(httpUri + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert rack info + assertTrue(topology.contains("/rack0")); + assertTrue(topology.contains("/rack1")); + assertTrue(topology.contains("/rack2")); + assertTrue(topology.contains("/rack3")); + assertTrue(topology.contains("/rack4")); + + // assert node number + assertEquals(topology.split("127.0.0.1").length - 1, + dataNodesNum); + } + + @Test + public void testPrintTopologyJsonFormat() throws IOException { + StaticMapping.resetMap(); + Configuration conf = new HdfsConfiguration(); + int dataNodesNum = 0; + final ArrayList rackList = new ArrayList(); + for (int i = 0; i < 5; i++) { + for (int j = 0; j < 2; j++) { + rackList.add("/rack" + i); + dataNodesNum++; + } + } + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(dataNodesNum) + .racks(rackList.toArray(new String[rackList.size()])) + .build(); + cluster.waitActive(); + + // get http uri + String httpUri = cluster.getHttpUri(0); + + // send http request + URL url = new URL(httpUri + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.setRequestProperty("Accept", "application/json"); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + String topology = out.toString(); + + // parse json + JsonNode racks = new ObjectMapper().readTree(topology); + + // assert rack number + assertEquals(racks.size(), 5); + + // assert node number + Iterator elements = racks.elements(); + int dataNodesCount = 0; + while(elements.hasNext()){ + JsonNode rack = elements.next(); + Iterator> fields = rack.fields(); + while (fields.hasNext()) { + dataNodesCount += fields.next().getValue().size(); + } + } + assertEquals(dataNodesCount, dataNodesNum); + } + + @Test + public void testPrintTopologyNoDatanodesTextFormat() throws IOException { + StaticMapping.resetMap(); + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + cluster.waitActive(); + + // get http uri + String httpUri = cluster.getHttpUri(0); + + // send http request + URL url = new URL(httpUri + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert node number + assertTrue(topology.contains("No DataNodes")); + } + + @Test + public void testPrintTopologyNoDatanodesJsonFormat() throws IOException { + StaticMapping.resetMap(); + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + cluster.waitActive(); + + // get http uri + String httpUri = cluster.getHttpUri(0); + + // send http request + URL url = new URL(httpUri + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.setRequestProperty("Accept", "application/json"); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert node number + assertTrue(topology.contains("No DataNodes")); + } +} From a505e469f95226b529bf1c80456deababd4bcd98 Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Tue, 20 Apr 2021 15:11:01 +0530 Subject: [PATCH 245/666] changes to use autoinitialize and maven depdendecy changes --- hadoop-common-project/hadoop-common/pom.xml | 16 +----- .../org/apache/hadoop/tracing/Tracer.java | 55 ++++--------------- hadoop-project/pom.xml | 11 ++++ 3 files changed, 25 insertions(+), 57 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 2fc4832fcb3e4..faf1511e57f23 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -374,27 +374,17 @@ io.opentelemetry opentelemetry-api - 1.0.0 + ${opentelemetry.version} io.opentelemetry opentelemetry-sdk - 1.0.0 + ${opentelemetry.version} io.opentelemetry opentelemetry-sdk-extension-autoconfigure - 1.0.0-alpha - - - io.opentelemetry - opentelemetry-exporter-jaeger - 1.0.0 - - - io.grpc - grpc-netty-shaded - 1.26.0 + ${opentelemetry.version}-alpha diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index d24c9175bf65b..534ecd7ed293d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -17,22 +17,12 @@ */ package org.apache.hadoop.tracing; -import io.grpc.ManagedChannel; -import io.grpc.ManagedChannelBuilder; import io.opentelemetry.api.OpenTelemetry; -import io.opentelemetry.api.common.AttributeKey; -import io.opentelemetry.api.common.Attributes; import io.opentelemetry.context.Context; -import io.opentelemetry.exporter.jaeger.JaegerGrpcSpanExporter; -import io.opentelemetry.sdk.OpenTelemetrySdk; -import io.opentelemetry.sdk.resources.Resource; -import io.opentelemetry.sdk.trace.SdkTracerProvider; -import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import io.opentelemetry.sdk.autoconfigure.OpenTelemetrySdkAutoConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.concurrent.TimeUnit; - /** * No-Op Tracer (for now) to remove HTrace without changing too many files. */ @@ -99,6 +89,8 @@ public String getName() { return name; } + + public static class Builder { static Tracer globalTracer; private String name; @@ -111,44 +103,19 @@ public Builder conf(TraceConfiguration conf) { return this; } - OpenTelemetry initialiseJaegerExporter(String jaegerHost, int jaegerPort, String name) { - ManagedChannel jaegerChannel = - ManagedChannelBuilder.forAddress(jaegerHost, jaegerPort).usePlaintext().build(); - // Export traces to Jaeger - JaegerGrpcSpanExporter jaegerExporter = - JaegerGrpcSpanExporter.builder() - .setChannel(jaegerChannel) - .setTimeout(30, TimeUnit.SECONDS) - .build(); - Resource serviceNameResource = - Resource.create(Attributes.of(AttributeKey.stringKey("service.name"), name)); - // Set to process the spans by the Jaeger Exporter - SdkTracerProvider tracerProvider = - SdkTracerProvider.builder() - .addSpanProcessor(SimpleSpanProcessor.create(jaegerExporter)) - .setResource(Resource.getDefault().merge(serviceNameResource)) - .build(); - OpenTelemetrySdk openTelemetry = - OpenTelemetrySdk.builder().setTracerProvider(tracerProvider).build(); - - // it's always a good idea to shut down the SDK cleanly at JVM exit. - Runtime.getRuntime().addShutdownHook(new Thread(tracerProvider::close)); - + static OpenTelemetry initialiseTracer(String name) { + //TODO: Explore the possibility of moving these properties to config or environment variables + System.setProperty("otel.metrics.exporter", "none"); + System.setProperty("otel.traces.exporter", "jaeger"); + System.setProperty("otel.exporter.jaeger.endpoint", "http://localhost:14250"); + System.setProperty("otel.resource.attributes", String.format("service.name=%s", name)); + OpenTelemetry openTelemetry = OpenTelemetrySdkAutoConfiguration.initialize(); return openTelemetry; } - OpenTelemetry noOpTracer(){ - return OpenTelemetry.noop(); - } - public Tracer build() { if (globalTracer == null) { - //jaeger tracing changes - OpenTelemetry openTelemetry = initialiseJaegerExporter("localhost", 14250, name); - io.opentelemetry.api.trace.Tracer tracer = openTelemetry.getTracer(name); - globalTracer = new Tracer(name, tracer); - - //globalTracer = new Tracer(name, noOpTracer().getTracer(name)); + globalTracer = new Tracer(name, initialiseTracer(name).getTracer(name)); Tracer.globalTracer = globalTracer; } return globalTracer; diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 173749eeeadd1..38b0e9bc123f3 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -212,6 +212,7 @@ 5.3.0 2.4.2 9.8.1 + 1.0.0 @@ -1756,6 +1757,16 @@ lz4-java ${lz4-java.version} + + io.opentelemetry + opentelemetry-exporter-jaeger + 1.0.0 + + + io.grpc + grpc-netty-shaded + 1.26.0 + From d3dcd0719dd586604521aff9c6ef0790a707932b Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Tue, 20 Apr 2021 15:32:01 +0100 Subject: [PATCH 246/666] HADOOP-17641. ITestWasbUriAndConfiguration failing. (#2937) This moves the mock account name --which is required to never exist-- from "mockAccount" to an account name containing a static UUID. Contributed by Steve Loughran. --- .../org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java index e420dabb5d0da..5d2d5d4afdc3f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java @@ -65,7 +65,7 @@ public final class AzureBlobStorageTestAccount implements AutoCloseable, public static final String ACCOUNT_KEY_PROPERTY_NAME = "fs.azure.account.key."; public static final String TEST_ACCOUNT_NAME_PROPERTY_NAME = "fs.azure.account.name"; public static final String WASB_TEST_ACCOUNT_NAME_WITH_DOMAIN = "fs.azure.wasb.account.name"; - public static final String MOCK_ACCOUNT_NAME = "mockAccount.blob.core.windows.net"; + public static final String MOCK_ACCOUNT_NAME = "mockAccount-c01112a3-2a23-433e-af2a-e808ea385136.blob.core.windows.net"; public static final String WASB_ACCOUNT_NAME_DOMAIN_SUFFIX = ".blob.core.windows.net"; public static final String WASB_ACCOUNT_NAME_DOMAIN_SUFFIX_REGEX = "\\.blob(\\.preprod)?\\.core\\.windows\\.net"; public static final String MOCK_CONTAINER_NAME = "mockContainer"; From 48abd28fa72a644de9b8bd89c5f5c7a559b6d664 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Tue, 20 Apr 2021 17:34:49 +0000 Subject: [PATCH 247/666] YARN-10723. Change CS nodes page in UI to support custom resource. Contributed by Qi Zhu --- .../resourcemanager/webapp/NodesPage.java | 65 +++++++++++++------ .../resourcemanager/webapp/TestNodesPage.java | 26 +++++++- 2 files changed, 69 insertions(+), 22 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index 8e8a2610f0ce7..545ae881803ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import java.util.Collection; +import java.util.Map; import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_LABEL; import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE; @@ -90,9 +91,7 @@ protected void render(Block html) { .th(".mem", "Phys Mem Used %") .th(".vcores", "VCores Used") .th(".vcores", "VCores Avail") - .th(".vcores", "Phys VCores Used %") - .th(".gpus", "GPUs Used") - .th(".gpus", "GPUs Avail"); + .th(".vcores", "Phys VCores Used %"); } else { trbody.th(".containers", "Running Containers (G)") .th(".allocationTags", "Allocation Tags") @@ -102,14 +101,26 @@ protected void render(Block html) { .th(".vcores", "VCores Used (G)") .th(".vcores", "VCores Avail (G)") .th(".vcores", "Phys VCores Used %") - .th(".gpus", "GPUs Used (G)") - .th(".gpus", "GPUs Avail (G)") .th(".containers", "Running Containers (O)") .th(".mem", "Mem Used (O)") .th(".vcores", "VCores Used (O)") .th(".containers", "Queued Containers"); } + for (Map.Entry integerEntry : + ResourceUtils.getResourceTypeIndex().entrySet()) { + if (integerEntry.getKey().equals(ResourceInformation.MEMORY_URI) + || integerEntry.getKey().equals(ResourceInformation.VCORES_URI)) { + continue; + } + + trbody.th("." + integerEntry.getKey(), + integerEntry.getKey() + " " + "Used"); + + trbody.th("." + integerEntry.getKey(), + integerEntry.getKey() + " " + "Avail"); + } + TBODY> tbody = trbody.th(".nodeManagerVersion", "Version").__().__().tbody(); @@ -175,17 +186,7 @@ protected void render(Block html) { nodeTableData.append("\",\"").append(httpAddress).append("\",").append("\""); } - Integer gpuIndex = ResourceUtils.getResourceTypeIndex() - .get(ResourceInformation.GPU_URI); - long usedGPUs = 0; - long availableGPUs = 0; - if (gpuIndex != null && info.getUsedResource() != null - && info.getAvailableResource() != null) { - usedGPUs = info.getUsedResource().getResource() - .getResourceValue(ResourceInformation.GPU_URI); - availableGPUs = info.getAvailableResource().getResource() - .getResourceValue(ResourceInformation.GPU_URI); - } + nodeTableData.append("
    ") .append(Times.format(info.getLastHealthUpdate())).append("\",\"") @@ -205,10 +206,6 @@ protected void render(Block html) { .append(String.valueOf(info.getAvailableVirtualCores())) .append("\",\"") .append(String.valueOf((int) info.getVcoreUtilization())) - .append("\",\"") - .append(String.valueOf(usedGPUs)) - .append("\",\"") - .append(String.valueOf(availableGPUs)) .append("\",\""); // If opportunistic containers are enabled, add extra fields. @@ -226,6 +223,34 @@ protected void render(Block html) { .append("\",\""); } + for (Map.Entry integerEntry : + ResourceUtils.getResourceTypeIndex().entrySet()) { + if (integerEntry.getKey().equals(ResourceInformation.MEMORY_URI) + || integerEntry.getKey().equals(ResourceInformation.VCORES_URI)) { + continue; + } + + long usedCustomResource = 0; + long availableCustomResource = 0; + + String resourceName = integerEntry.getKey(); + Integer index = integerEntry.getValue(); + + if (index != null && info.getUsedResource() != null + && info.getAvailableResource() != null) { + usedCustomResource = info.getUsedResource().getResource() + .getResourceValue(resourceName); + availableCustomResource = info.getAvailableResource().getResource() + .getResourceValue(resourceName); + + nodeTableData + .append(usedCustomResource) + .append("\",\"") + .append(availableCustomResource) + .append("\",\""); + } + } + nodeTableData.append(ni.getNodeManagerVersion()) .append("\"],\n"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java index 9ab6583b06ce4..dd271fd34d798 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java @@ -53,7 +53,7 @@ public class TestNodesPage { // Number of Actual Table Headers for NodesPage.NodesBlock might change in // future. In that case this value should be adjusted to the new value. private final int numberOfThInMetricsTable = 23; - private final int numberOfActualTableHeaders = 18; + private final int numberOfActualTableHeaders = 16; private final int numberOfThForOpportunisticContainers = 4; private Injector injector; @@ -119,12 +119,34 @@ public void testNodesBlockRenderForLostNodesWithGPUResources() initResourceTypes(ResourceInformation.GPU_URI); this.setUpInternal(true); try { - this.testNodesBlockRenderForLostNodes(); + // Test gpu as a custom resource. + // + // yarn.io/gpu Used + // + // + // yarn.io/gpu Avail + // + this.testNodesBlockRenderForLostNodesWithGPU(); } finally { ResourceUtils.initializeResourcesFromResourceInformationMap(oldRtMap); } } + public void testNodesBlockRenderForLostNodesWithGPU() { + NodesBlock nodesBlock = injector.getInstance(NodesBlock.class); + nodesBlock.set("node.state", "lost"); + nodesBlock.render(); + PrintWriter writer = injector.getInstance(PrintWriter.class); + WebAppTests.flushOutput(injector); + + Mockito.verify(writer, + Mockito.times(numberOfActualTableHeaders + + numberOfThInMetricsTable + 2)) + .print(" Date: Wed, 21 Apr 2021 12:35:17 +0530 Subject: [PATCH 248/666] jaeger maven dependency changes for test --- hadoop-common-project/hadoop-common/pom.xml | 12 ++++++++++++ .../main/java/org/apache/hadoop/tracing/Tracer.java | 2 +- hadoop-project/pom.xml | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index faf1511e57f23..1acb39f056c2d 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -386,6 +386,18 @@ opentelemetry-sdk-extension-autoconfigure ${opentelemetry.version}-alpha + + io.opentelemetry + opentelemetry-exporter-jaeger + ${opentelemetry.version} + test + + + io.grpc + grpc-netty-shaded + 1.26.0 + test + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index 534ecd7ed293d..5fe885f0ec798 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -113,7 +113,7 @@ static OpenTelemetry initialiseTracer(String name) { return openTelemetry; } - public Tracer build() { + public synchronized Tracer build() { if (globalTracer == null) { globalTracer = new Tracer(name, initialiseTracer(name).getTracer(name)); Tracer.globalTracer = globalTracer; diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 38b0e9bc123f3..e950ed9c6f208 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1760,7 +1760,7 @@ io.opentelemetry opentelemetry-exporter-jaeger - 1.0.0 + ${opentelemetry.version} io.grpc From 0938e133a8ea8719fdf0b3cdc297563515b3b9ec Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 21 Apr 2021 16:15:49 +0530 Subject: [PATCH 249/666] HDFS-15989. Split TestBalancer and De-flake testMaxIterationTime() (#2923) Signed-off-by: Akira Ajisaka Signed-off-by: Takanobu Asanuma --- .../hdfs/server/balancer/TestBalancer.java | 616 --------------- .../TestBalancerLongRunningTasks.java | 746 ++++++++++++++++++ 2 files changed, 746 insertions(+), 616 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index f59743fdaa73e..378d877cbf703 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -19,8 +19,6 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY; import static org.apache.hadoop.fs.StorageType.DEFAULT; -import static org.apache.hadoop.fs.StorageType.RAM_DISK; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; @@ -29,25 +27,18 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BALANCER_KEYTAB_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BALANCER_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; -import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.junit.AfterClass; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -58,10 +49,8 @@ import java.io.File; import java.io.IOException; -import java.io.OutputStream; import java.io.PrintWriter; import java.net.InetAddress; -import java.net.InetSocketAddress; import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; @@ -88,8 +77,6 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; @@ -109,14 +96,9 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.balancer.Balancer.Cli; import org.apache.hadoop.hdfs.server.balancer.Balancer.Result; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithUpgradeDomain; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus; -import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -187,7 +169,6 @@ public void shutdown() throws Exception { static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5% static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta static final int DEFAULT_BLOCK_SIZE = 100; - static final int DEFAULT_RAM_DISK_BLOCK_SIZE = 5 * 1024 * 1024; private static final Random r = new Random(); static { @@ -213,20 +194,6 @@ static void initConf(Configuration conf) { conf.setInt(DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY, 5*1000); } - static void initConfWithRamDisk(Configuration conf, - long ramDiskCapacity) { - conf.setLong(DFS_BLOCK_SIZE_KEY, DEFAULT_RAM_DISK_BLOCK_SIZE); - conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, ramDiskCapacity); - conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, 3); - conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); - conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); - conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, 1); - conf.setInt(DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY, 5*1000); - LazyPersistTestCase.initCacheManipulator(); - - conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L); - } - private final ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlocks = ecPolicy.getNumDataUnits(); @@ -487,160 +454,6 @@ static void waitForBalancer(long totalUsedSpace, long totalCapacity, waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, 0); } - /** - * Make sure that balancer can't move pinned blocks. - * If specified favoredNodes when create file, blocks will be pinned use - * sticky bit. - * @throws Exception - */ - @Test(timeout=100000) - public void testBalancerWithPinnedBlocks() throws Exception { - // This test assumes stick-bit based block pin mechanism available only - // in Linux/Unix. It can be unblocked on Windows when HDFS-7759 is ready to - // provide a different mechanism for Windows. - assumeNotWindows(); - - final Configuration conf = new HdfsConfiguration(); - initConf(conf); - conf.setBoolean(DFS_DATANODE_BLOCK_PINNING_ENABLED, true); - - long[] capacities = new long[] { CAPACITY, CAPACITY }; - String[] hosts = {"host0", "host1"}; - String[] racks = { RACK0, RACK1 }; - int numOfDatanodes = capacities.length; - - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length) - .hosts(hosts).racks(racks).simulatedCapacities(capacities).build(); - - cluster.waitActive(); - client = NameNodeProxies.createProxy(conf, - cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); - - // fill up the cluster to be 80% full - long totalCapacity = sum(capacities); - long totalUsedSpace = totalCapacity * 8 / 10; - InetSocketAddress[] favoredNodes = new InetSocketAddress[numOfDatanodes]; - for (int i = 0; i < favoredNodes.length; i++) { - // DFSClient will attempt reverse lookup. In case it resolves - // "127.0.0.1" to "localhost", we manually specify the hostname. - int port = cluster.getDataNodes().get(i).getXferAddress().getPort(); - favoredNodes[i] = new InetSocketAddress(hosts[i], port); - } - - DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024, - totalUsedSpace / numOfDatanodes, DEFAULT_BLOCK_SIZE, - (short) numOfDatanodes, 0, false, favoredNodes); - - // start up an empty node with the same capacity - cluster.startDataNodes(conf, 1, true, null, new String[] { RACK2 }, - new long[] { CAPACITY }); - - totalCapacity += CAPACITY; - - // run balancer and validate results - waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); - - // start rebalancing - Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); - int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf); - assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); - } - - /** - * Verify balancer won't violate the default block placement policy. - * @throws Exception - */ - @Test(timeout=100000) - public void testRackPolicyAfterBalance() throws Exception { - final Configuration conf = new HdfsConfiguration(); - initConf(conf); - long[] capacities = new long[] { CAPACITY, CAPACITY }; - String[] hosts = {"host0", "host1"}; - String[] racks = { RACK0, RACK1 }; - runBalancerAndVerifyBlockPlacmentPolicy(conf, capacities, hosts, racks, - null, CAPACITY, "host2", RACK1, null); - } - - /** - * Verify balancer won't violate upgrade domain block placement policy. - * @throws Exception - */ - @Test(timeout=100000) - public void testUpgradeDomainPolicyAfterBalance() throws Exception { - final Configuration conf = new HdfsConfiguration(); - initConf(conf); - conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, - BlockPlacementPolicyWithUpgradeDomain.class, - BlockPlacementPolicy.class); - long[] capacities = new long[] { CAPACITY, CAPACITY, CAPACITY }; - String[] hosts = {"host0", "host1", "host2"}; - String[] racks = { RACK0, RACK1, RACK1 }; - String[] UDs = { "ud0", "ud1", "ud2" }; - runBalancerAndVerifyBlockPlacmentPolicy(conf, capacities, hosts, racks, - UDs, CAPACITY, "host3", RACK2, "ud2"); - } - - private void runBalancerAndVerifyBlockPlacmentPolicy(Configuration conf, - long[] capacities, String[] hosts, String[] racks, String[] UDs, - long newCapacity, String newHost, String newRack, String newUD) - throws Exception { - int numOfDatanodes = capacities.length; - - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length) - .hosts(hosts).racks(racks).simulatedCapacities(capacities).build(); - DatanodeManager dm = cluster.getNamesystem().getBlockManager(). - getDatanodeManager(); - if (UDs != null) { - for(int i = 0; i < UDs.length; i++) { - DatanodeID datanodeId = cluster.getDataNodes().get(i).getDatanodeId(); - dm.getDatanode(datanodeId).setUpgradeDomain(UDs[i]); - } - } - - try { - cluster.waitActive(); - client = NameNodeProxies.createProxy(conf, - cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); - - // fill up the cluster to be 80% full - long totalCapacity = sum(capacities); - long totalUsedSpace = totalCapacity * 8 / 10; - - final long fileSize = totalUsedSpace / numOfDatanodes; - DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024, - fileSize, DEFAULT_BLOCK_SIZE, (short) numOfDatanodes, 0, false); - - // start up an empty node with the same capacity on the same rack as the - // pinned host. - cluster.startDataNodes(conf, 1, true, null, new String[] { newRack }, - new String[] { newHost }, new long[] { newCapacity }); - if (newUD != null) { - DatanodeID newId = cluster.getDataNodes().get( - numOfDatanodes).getDatanodeId(); - dm.getDatanode(newId).setUpgradeDomain(newUD); - } - totalCapacity += newCapacity; - - // run balancer and validate results - waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); - - // start rebalancing - Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); - Balancer.run(namenodes, BalancerParameters.DEFAULT, conf); - BlockPlacementPolicy placementPolicy = - cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy(); - List locatedBlocks = client. - getBlockLocations(fileName, 0, fileSize).getLocatedBlocks(); - for (LocatedBlock locatedBlock : locatedBlocks) { - BlockPlacementStatus status = placementPolicy.verifyBlockPlacement( - locatedBlock.getLocations(), numOfDatanodes); - assertTrue(status.isPlacementPolicySatisfied()); - } - } finally { - cluster.shutdown(); - } - } - /** * Wait until balanced: each datanode gives utilization within * BALANCE_ALLOWED_VARIANCE of average @@ -1600,144 +1413,6 @@ public void testBalancerCliWithIncludeListWithPortsInAFile() throws Exception { CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, true); } - - @Test(timeout = 100000) - public void testMaxIterationTime() throws Exception { - final Configuration conf = new HdfsConfiguration(); - initConf(conf); - int blockSize = 10*1024*1024; // 10MB block size - conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize); - // limit the worker thread count of Balancer to have only 1 queue per DN - conf.setInt(DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY, 1); - // limit the bandwidth to 4MB per sec to emulate slow block moves - conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, - 4 * 1024 * 1024); - // set client socket timeout to have an IN_PROGRESS notification back from - // the DataNode about the copy in every second. - conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2000L); - // set max iteration time to 2 seconds to timeout before moving any block - conf.setLong(DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY, 2000L); - // setup the cluster - final long capacity = 10L * blockSize; - final long[] dnCapacities = new long[] {capacity, capacity}; - final short rep = 1; - final long seed = 0xFAFAFA; - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0) - .build(); - try { - cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); - conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); - cluster.startDataNodes(conf, 1, true, null, null, dnCapacities); - cluster.waitClusterUp(); - cluster.waitActive(); - final Path path = new Path("/testMaxIterationTime.dat"); - DistributedFileSystem fs = cluster.getFileSystem(); - // fill the DN to 40% - DFSTestUtil.createFile(fs, path, 4L * blockSize, rep, seed); - // start a new DN - cluster.startDataNodes(conf, 1, true, null, null, dnCapacities); - cluster.triggerHeartbeats(); - // setup Balancer and run one iteration - List connectors = Collections.emptyList(); - try { - BalancerParameters bParams = BalancerParameters.DEFAULT; - // set maxIdleIterations to 1 for NO_MOVE_PROGRESS to be - // reported when there is no block move - connectors = NameNodeConnector.newNameNodeConnectors( - DFSUtil.getInternalNsRpcUris(conf), Balancer.class.getSimpleName(), - Balancer.BALANCER_ID_PATH, conf, 1); - for (NameNodeConnector nnc : connectors) { - LOG.info("NNC to work on: " + nnc); - Balancer b = new Balancer(nnc, bParams, conf); - Result r = b.runOneIteration(); - // Since no block cannot be moved in 2 seconds (i.e., - // 4MB/s * 2s = 8MB < 10MB), NO_MOVE_PROGRESS will be reported. - // When a block move is not canceled in 2 seconds properly and then - // a block is moved unexpectedly, IN_PROGRESS will be reported. - assertEquals("We expect ExitStatus.NO_MOVE_PROGRESS to be reported.", - ExitStatus.NO_MOVE_PROGRESS, r.getExitStatus()); - assertEquals(0, r.getBlocksMoved()); - } - } finally { - for (NameNodeConnector nnc : connectors) { - IOUtils.cleanupWithLogger(null, nnc); - } - } - } finally { - cluster.shutdown(true, true); - } - } - - /* - * Test Balancer with Ram_Disk configured - * One DN has two files on RAM_DISK, other DN has no files on RAM_DISK. - * Then verify that the balancer does not migrate files on RAM_DISK across DN. - */ - @Test(timeout=300000) - public void testBalancerWithRamDisk() throws Exception { - final int SEED = 0xFADED; - final short REPL_FACT = 1; - Configuration conf = new Configuration(); - - final int defaultRamDiskCapacity = 10; - final long ramDiskStorageLimit = - ((long) defaultRamDiskCapacity * DEFAULT_RAM_DISK_BLOCK_SIZE) + - (DEFAULT_RAM_DISK_BLOCK_SIZE - 1); - final long diskStorageLimit = - ((long) defaultRamDiskCapacity * DEFAULT_RAM_DISK_BLOCK_SIZE) + - (DEFAULT_RAM_DISK_BLOCK_SIZE - 1); - - initConfWithRamDisk(conf, ramDiskStorageLimit); - - cluster = new MiniDFSCluster - .Builder(conf) - .numDataNodes(1) - .storageCapacities(new long[] { ramDiskStorageLimit, diskStorageLimit }) - .storageTypes(new StorageType[] { RAM_DISK, DEFAULT }) - .build(); - - cluster.waitActive(); - // Create few files on RAM_DISK - final String METHOD_NAME = GenericTestUtils.getMethodName(); - final Path path1 = new Path("/" + METHOD_NAME + ".01.dat"); - final Path path2 = new Path("/" + METHOD_NAME + ".02.dat"); - - DistributedFileSystem fs = cluster.getFileSystem(); - DFSClient client = fs.getClient(); - DFSTestUtil.createFile(fs, path1, true, - DEFAULT_RAM_DISK_BLOCK_SIZE, 4 * DEFAULT_RAM_DISK_BLOCK_SIZE, - DEFAULT_RAM_DISK_BLOCK_SIZE, REPL_FACT, SEED, true); - DFSTestUtil.createFile(fs, path2, true, - DEFAULT_RAM_DISK_BLOCK_SIZE, 1 * DEFAULT_RAM_DISK_BLOCK_SIZE, - DEFAULT_RAM_DISK_BLOCK_SIZE, REPL_FACT, SEED, true); - - // Sleep for a short time to allow the lazy writer thread to do its job - Thread.sleep(6 * 1000); - - // Add another fresh DN with the same type/capacity without files on RAM_DISK - StorageType[][] storageTypes = new StorageType[][] {{RAM_DISK, DEFAULT}}; - long[][] storageCapacities = new long[][]{{ramDiskStorageLimit, - diskStorageLimit}}; - cluster.startDataNodes(conf, REPL_FACT, storageTypes, true, null, - null, null, storageCapacities, null, false, false, false, null); - - cluster.triggerHeartbeats(); - Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); - - // Run Balancer - final BalancerParameters p = BalancerParameters.DEFAULT; - final int r = Balancer.run(namenodes, p, conf); - - // Validate no RAM_DISK block should be moved - assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); - - // Verify files are still on RAM_DISK - DFSTestUtil.verifyFileReplicasOnStorageType(fs, client, path1, RAM_DISK); - DFSTestUtil.verifyFileReplicasOnStorageType(fs, client, path2, RAM_DISK); - } - /** * Check that the balancer exits when there is an unfinalized upgrade. */ @@ -1800,66 +1475,6 @@ public void testBalancerDuringUpgrade() throws Exception { Balancer.run(namenodes, p, conf)); } - /** - * Test special case. Two replicas belong to same block should not in same node. - * We have 2 nodes. - * We have a block in (DN0,SSD) and (DN1,DISK). - * Replica in (DN0,SSD) should not be moved to (DN1,SSD). - * Otherwise DN1 has 2 replicas. - */ - @Test(timeout=100000) - public void testTwoReplicaShouldNotInSameDN() throws Exception { - final Configuration conf = new HdfsConfiguration(); - - int blockSize = 5 * 1024 * 1024 ; - conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, - 1L); - - conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L); - - int numOfDatanodes =2; - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(2) - .racks(new String[]{"/default/rack0", "/default/rack0"}) - .storagesPerDatanode(2) - .storageTypes(new StorageType[][]{ - {StorageType.SSD, StorageType.DISK}, - {StorageType.SSD, StorageType.DISK}}) - .storageCapacities(new long[][]{ - {100 * blockSize, 20 * blockSize}, - {20 * blockSize, 100 * blockSize}}) - .build(); - cluster.waitActive(); - - //set "/bar" directory with ONE_SSD storage policy. - DistributedFileSystem fs = cluster.getFileSystem(); - Path barDir = new Path("/bar"); - fs.mkdir(barDir,new FsPermission((short)777)); - fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); - - // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full, - // and (DN0,SSD) and (DN1,DISK) are about 15% full. - long fileLen = 30 * blockSize; - // fooFile has ONE_SSD policy. So - // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block. - // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block. - Path fooFile = new Path(barDir, "foo"); - createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0); - // update space info - cluster.triggerHeartbeats(); - - BalancerParameters p = BalancerParameters.DEFAULT; - Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); - final int r = Balancer.run(namenodes, p, conf); - - // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK) - // already has one. Otherwise DN1 will have 2 replicas. - // For same reason, no replicas were moved. - assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); - } - /** * Test running many balancer simultaneously. * @@ -1931,121 +1546,6 @@ public void testManyBalancerSimultaneously() throws Exception { ExitStatus.SUCCESS.getExitCode(), exitCode); } - /** Balancer should not move blocks with size < minBlockSize. */ - @Test(timeout=60000) - public void testMinBlockSizeAndSourceNodes() throws Exception { - final Configuration conf = new HdfsConfiguration(); - initConf(conf); - - final short replication = 3; - final long[] lengths = {10, 10, 10, 10}; - final long[] capacities = new long[replication]; - final long totalUsed = capacities.length * sum(lengths); - Arrays.fill(capacities, 1000); - - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(capacities.length) - .simulatedCapacities(capacities) - .build(); - final DistributedFileSystem dfs = cluster.getFileSystem(); - cluster.waitActive(); - client = NameNodeProxies.createProxy(conf, dfs.getUri(), - ClientProtocol.class).getProxy(); - - // fill up the cluster to be 80% full - for(int i = 0; i < lengths.length; i++) { - final long size = lengths[i]; - final Path p = new Path("/file" + i + "_size" + size); - try(OutputStream out = dfs.create(p)) { - for(int j = 0; j < size; j++) { - out.write(j); - } - } - } - - // start up an empty node with the same capacity - cluster.startDataNodes(conf, capacities.length, true, null, null, capacities); - LOG.info("capacities = " + Arrays.toString(capacities)); - LOG.info("totalUsedSpace= " + totalUsed); - LOG.info("lengths = " + Arrays.toString(lengths) + ", #=" + lengths.length); - waitForHeartBeat(totalUsed, 2*capacities[0]*capacities.length, client, cluster); - - final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); - - { // run Balancer with min-block-size=50 - final BalancerParameters p = Balancer.Cli.parse(new String[] { - "-policy", BalancingPolicy.Node.INSTANCE.getName(), - "-threshold", "1" - }); - assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE); - assertEquals(p.getThreshold(), 1.0, 0.001); - - conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50); - final int r = Balancer.run(namenodes, p, conf); - assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); - } - - conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1); - - { // run Balancer with empty nodes as source nodes - final Set sourceNodes = new HashSet<>(); - final List datanodes = cluster.getDataNodes(); - for(int i = capacities.length; i < datanodes.size(); i++) { - sourceNodes.add(datanodes.get(i).getDisplayName()); - } - final BalancerParameters p = Balancer.Cli.parse(new String[] { - "-policy", BalancingPolicy.Node.INSTANCE.getName(), - "-threshold", "1", - "-source", StringUtils.join(sourceNodes, ',') - }); - assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE); - assertEquals(p.getThreshold(), 1.0, 0.001); - assertEquals(p.getSourceNodes(), sourceNodes); - - conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50); - final int r = Balancer.run(namenodes, p, conf); - assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r); - } - - { // run Balancer with a filled node as a source node - final Set sourceNodes = new HashSet<>(); - final List datanodes = cluster.getDataNodes(); - sourceNodes.add(datanodes.get(0).getDisplayName()); - final BalancerParameters p = Balancer.Cli.parse(new String[] { - "-policy", BalancingPolicy.Node.INSTANCE.getName(), - "-threshold", "1", - "-source", StringUtils.join(sourceNodes, ',') - }); - assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE); - assertEquals(p.getThreshold(), 1.0, 0.001); - assertEquals(p.getSourceNodes(), sourceNodes); - - conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1); - final int r = Balancer.run(namenodes, p, conf); - assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r); - } - - { // run Balancer with all filled node as source nodes - final Set sourceNodes = new HashSet<>(); - final List datanodes = cluster.getDataNodes(); - for(int i = 0; i < capacities.length; i++) { - sourceNodes.add(datanodes.get(i).getDisplayName()); - } - final BalancerParameters p = Balancer.Cli.parse(new String[] { - "-policy", BalancingPolicy.Node.INSTANCE.getName(), - "-threshold", "1", - "-source", StringUtils.join(sourceNodes, ',') - }); - assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE); - assertEquals(p.getThreshold(), 1.0, 0.001); - assertEquals(p.getSourceNodes(), sourceNodes); - - conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1); - final int r = Balancer.run(namenodes, p, conf); - assertEquals(ExitStatus.SUCCESS.getExitCode(), r); - } - } - public void integrationTestWithStripedFile(Configuration conf) throws Exception { initConfWithStripe(conf); doTestBalancerWithStripedFile(conf); @@ -2203,122 +1703,6 @@ void testBalancerRPCDelay(int getBlocksMaxQps) throws Exception { getBlocksMaxQps, getBlockCallsPerSecond <= getBlocksMaxQps); } - @Test(timeout = 60000) - public void testBalancerWithSortTopNodes() throws Exception { - final Configuration conf = new HdfsConfiguration(); - initConf(conf); - conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 30000); - - final long capacity = 1000L; - final int diffBetweenNodes = 50; - - // Set up the datanodes with two groups: - // 5 over-utilized nodes with 80%, 85%, 90%, 95%, 100% usage - // 2 under-utilizaed nodes with 0%, 5% usage - // With sortTopNodes option, 100% and 95% used ones will be chosen. - final int numOfOverUtilizedDn = 5; - final int numOfUnderUtilizedDn = 2; - final int totalNumOfDn = numOfOverUtilizedDn + numOfUnderUtilizedDn; - final long[] capacityArray = new long[totalNumOfDn]; - Arrays.fill(capacityArray, capacity); - - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(totalNumOfDn) - .simulatedCapacities(capacityArray) - .build(); - - cluster.setDataNodesDead(); - - List dataNodes = cluster.getDataNodes(); - - // Create top used nodes - for (int i = 0; i < numOfOverUtilizedDn; i++) { - // Bring one node alive - DataNodeTestUtils.triggerHeartbeat(dataNodes.get(i)); - DataNodeTestUtils.triggerBlockReport(dataNodes.get(i)); - // Create nodes with: 80%, 85%, 90%, 95%, 100%. - int capacityForThisDatanode = (int)capacity - - diffBetweenNodes * (numOfOverUtilizedDn - i - 1); - createFile(cluster, new Path("test_big" + i), - capacityForThisDatanode, (short) 1, 0); - cluster.setDataNodesDead(); - } - - // Create under utilized nodes - for (int i = numOfUnderUtilizedDn - 1; i >= 0; i--) { - int index = i + numOfOverUtilizedDn; - // Bring one node alive - DataNodeTestUtils.triggerHeartbeat(dataNodes.get(index)); - DataNodeTestUtils.triggerBlockReport(dataNodes.get(index)); - // Create nodes with: 5%, 0% - int capacityForThisDatanode = diffBetweenNodes * i; - createFile(cluster, - new Path("test_small" + i), - capacityForThisDatanode, (short) 1, 0); - cluster.setDataNodesDead(); - } - - // Bring all nodes alive - cluster.triggerHeartbeats(); - cluster.triggerBlockReports(); - cluster.waitFirstBRCompleted(0, 6000); - - final BalancerParameters p = Balancer.Cli.parse(new String[] { - "-policy", BalancingPolicy.Node.INSTANCE.getName(), - "-threshold", "1", - "-sortTopNodes" - }); - - client = NameNodeProxies.createProxy(conf, - cluster.getFileSystem(0).getUri(), - ClientProtocol.class).getProxy(); - - // Set max-size-to-move to small number - // so only top two nodes will be chosen in one iteration. - conf.setLong(DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY, 99L); - - final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); - - List connectors = NameNodeConnector - .newNameNodeConnectors(namenodes, - Balancer.class.getSimpleName(), Balancer.BALANCER_ID_PATH, conf, - BalancerParameters.DEFAULT.getMaxIdleIteration()); - final Balancer b = new Balancer(connectors.get(0), p, conf); - Result balancerResult = b.runOneIteration(); - - cluster.triggerDeletionReports(); - cluster.triggerBlockReports(); - cluster.triggerHeartbeats(); - - DatanodeInfo[] datanodeReport = client - .getDatanodeReport(DatanodeReportType.ALL); - - long maxUsage = 0; - for (int i = 0; i < totalNumOfDn; i++) { - maxUsage = Math.max(maxUsage, datanodeReport[i].getDfsUsed()); - } - - // The 95% usage DN will have 9 blocks of 100B and 1 block of 50B - all for the same file. - // The HDFS balancer will choose a block to move from this node randomly. More likely it will - // be 100B block. Since 100B is greater than DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY which is 99L, - // it will stop here. Total bytes moved from this 95% DN will be 1 block of size 100B. - // However, chances are the first block selected to be moved from this 95% DN is the 50B block. - // After this block is moved, the total moved size so far would be 50B which is smaller than - // DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY (99L), hence it will try to move another block. - // The second block will always be of size 100B. So total bytes moved from this 95% DN will be - // 2 blocks of size (100B + 50B) 150B. - // Hence, overall total blocks moved by HDFS balancer would be either of these 2 options: - // a) 2 blocks of total size (100B + 100B) - // b) 3 blocks of total size (50B + 100B + 100B) - assertTrue("BalancerResult is not as expected. " + balancerResult, - (balancerResult.getBytesAlreadyMoved() == 200 - && balancerResult.getBlocksMoved() == 2) - || (balancerResult.getBytesAlreadyMoved() == 250 - && balancerResult.getBlocksMoved() == 3)); - // 100% and 95% used nodes will be balanced, so top used will be 900 - assertEquals(900, maxUsage); - } - /** * @param args */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java new file mode 100644 index 0000000000000..0d6300cf05e91 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java @@ -0,0 +1,746 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.balancer; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.NameNodeProxies; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithUpgradeDomain; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; + +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.URI; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.fs.StorageType.DEFAULT; +import static org.apache.hadoop.fs.StorageType.RAM_DISK; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC; +import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * Some long running Balancer tasks. + */ +public class TestBalancerLongRunningTasks { + + private static final Logger LOG = + LoggerFactory.getLogger(TestBalancerLongRunningTasks.class); + + static { + GenericTestUtils.setLogLevel(Balancer.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(Dispatcher.LOG, Level.DEBUG); + } + + private final static long CAPACITY = 5000L; + private final static String RACK0 = "/rack0"; + private final static String RACK1 = "/rack1"; + private final static String RACK2 = "/rack2"; + private final static String FILE_NAME = "/tmp.txt"; + private final static Path FILE_PATH = new Path(FILE_NAME); + private MiniDFSCluster cluster; + + @After + public void shutdown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + private ClientProtocol client; + + static final int DEFAULT_BLOCK_SIZE = 100; + static final int DEFAULT_RAM_DISK_BLOCK_SIZE = 5 * 1024 * 1024; + + static { + initTestSetup(); + } + + public static void initTestSetup() { + // do not create id file since it occupies the disk space + NameNodeConnector.setWrite2IdFile(false); + } + + static void initConf(Configuration conf) { + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); + conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE); + conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1L); + SimulatedFSDataset.setFactory(conf); + + conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L); + conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L); + conf.setInt(DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY, 5 * 1000); + } + + static void initConfWithRamDisk(Configuration conf, + long ramDiskCapacity) { + conf.setLong(DFS_BLOCK_SIZE_KEY, DEFAULT_RAM_DISK_BLOCK_SIZE); + conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, ramDiskCapacity); + conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, 3); + conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); + conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); + conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, 1); + conf.setInt(DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY, 5 * 1000); + LazyPersistTestCase.initCacheManipulator(); + + conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L); + } + + /** + * Test special case. Two replicas belong to same block should not in same + * node. + * We have 2 nodes. + * We have a block in (DN0,SSD) and (DN1,DISK). + * Replica in (DN0,SSD) should not be moved to (DN1,SSD). + * Otherwise DN1 has 2 replicas. + */ + @Test(timeout = 100000) + public void testTwoReplicaShouldNotInSameDN() throws Exception { + final Configuration conf = new HdfsConfiguration(); + + int blockSize = 5 * 1024 * 1024; + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1L); + + conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L); + + int numOfDatanodes = 2; + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(2) + .racks(new String[]{"/default/rack0", "/default/rack0"}) + .storagesPerDatanode(2) + .storageTypes(new StorageType[][]{ + {StorageType.SSD, StorageType.DISK}, + {StorageType.SSD, StorageType.DISK}}) + .storageCapacities(new long[][]{ + {100 * blockSize, 20 * blockSize}, + {20 * blockSize, 100 * blockSize}}) + .build(); + cluster.waitActive(); + + //set "/bar" directory with ONE_SSD storage policy. + DistributedFileSystem fs = cluster.getFileSystem(); + Path barDir = new Path("/bar"); + fs.mkdir(barDir, new FsPermission((short) 777)); + fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); + + // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full, + // and (DN0,SSD) and (DN1,DISK) are about 15% full. + long fileLen = 30 * blockSize; + // fooFile has ONE_SSD policy. So + // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block. + // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block. + Path fooFile = new Path(barDir, "foo"); + TestBalancer.createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, + 0); + // update space info + cluster.triggerHeartbeats(); + + BalancerParameters p = BalancerParameters.DEFAULT; + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); + final int r = Balancer.run(namenodes, p, conf); + + // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK) + // already has one. Otherwise DN1 will have 2 replicas. + // For same reason, no replicas were moved. + assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); + } + + /* + * Test Balancer with Ram_Disk configured + * One DN has two files on RAM_DISK, other DN has no files on RAM_DISK. + * Then verify that the balancer does not migrate files on RAM_DISK across DN. + */ + @Test(timeout = 300000) + public void testBalancerWithRamDisk() throws Exception { + final int seed = 0xFADED; + final short replicationFactor = 1; + Configuration conf = new Configuration(); + + final int defaultRamDiskCapacity = 10; + final long ramDiskStorageLimit = + ((long) defaultRamDiskCapacity * DEFAULT_RAM_DISK_BLOCK_SIZE) + + (DEFAULT_RAM_DISK_BLOCK_SIZE - 1); + final long diskStorageLimit = + ((long) defaultRamDiskCapacity * DEFAULT_RAM_DISK_BLOCK_SIZE) + + (DEFAULT_RAM_DISK_BLOCK_SIZE - 1); + + initConfWithRamDisk(conf, ramDiskStorageLimit); + + cluster = new MiniDFSCluster + .Builder(conf) + .numDataNodes(1) + .storageCapacities(new long[]{ramDiskStorageLimit, diskStorageLimit}) + .storageTypes(new StorageType[]{RAM_DISK, DEFAULT}) + .build(); + + cluster.waitActive(); + // Create few files on RAM_DISK + final String methodName = GenericTestUtils.getMethodName(); + final Path path1 = new Path("/" + methodName + ".01.dat"); + final Path path2 = new Path("/" + methodName + ".02.dat"); + + DistributedFileSystem fs = cluster.getFileSystem(); + DFSClient dfsClient = fs.getClient(); + DFSTestUtil.createFile(fs, path1, true, + DEFAULT_RAM_DISK_BLOCK_SIZE, 4 * DEFAULT_RAM_DISK_BLOCK_SIZE, + DEFAULT_RAM_DISK_BLOCK_SIZE, replicationFactor, seed, true); + DFSTestUtil.createFile(fs, path2, true, + DEFAULT_RAM_DISK_BLOCK_SIZE, 1 * DEFAULT_RAM_DISK_BLOCK_SIZE, + DEFAULT_RAM_DISK_BLOCK_SIZE, replicationFactor, seed, true); + + // Sleep for a short time to allow the lazy writer thread to do its job + Thread.sleep(6 * 1000); + + // Add another fresh DN with the same type/capacity without files on + // RAM_DISK + StorageType[][] storageTypes = new StorageType[][]{{RAM_DISK, DEFAULT}}; + long[][] storageCapacities = new long[][]{{ramDiskStorageLimit, + diskStorageLimit}}; + cluster.startDataNodes(conf, replicationFactor, storageTypes, true, null, + null, null, storageCapacities, null, false, false, false, null); + + cluster.triggerHeartbeats(); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); + + // Run Balancer + final BalancerParameters p = BalancerParameters.DEFAULT; + final int r = Balancer.run(namenodes, p, conf); + + // Validate no RAM_DISK block should be moved + assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); + + // Verify files are still on RAM_DISK + DFSTestUtil.verifyFileReplicasOnStorageType(fs, dfsClient, path1, RAM_DISK); + DFSTestUtil.verifyFileReplicasOnStorageType(fs, dfsClient, path2, RAM_DISK); + } + + /** + * Balancer should not move blocks with size < minBlockSize. + */ + @Test(timeout = 60000) + public void testMinBlockSizeAndSourceNodes() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + + final short replication = 3; + final long[] lengths = {10, 10, 10, 10}; + final long[] capacities = new long[replication]; + final long totalUsed = capacities.length * TestBalancer.sum(lengths); + Arrays.fill(capacities, 1000); + + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(capacities.length) + .simulatedCapacities(capacities) + .build(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + cluster.waitActive(); + client = NameNodeProxies.createProxy(conf, dfs.getUri(), + ClientProtocol.class).getProxy(); + + // fill up the cluster to be 80% full + for (int i = 0; i < lengths.length; i++) { + final long size = lengths[i]; + final Path p = new Path("/file" + i + "_size" + size); + try (OutputStream out = dfs.create(p)) { + for (int j = 0; j < size; j++) { + out.write(j); + } + } + } + + // start up an empty node with the same capacity + cluster.startDataNodes(conf, capacities.length, true, null, null, capacities); + LOG.info("capacities = " + Arrays.toString(capacities)); + LOG.info("totalUsedSpace= " + totalUsed); + LOG.info("lengths = " + Arrays.toString(lengths) + ", #=" + lengths.length); + TestBalancer.waitForHeartBeat(totalUsed, + 2 * capacities[0] * capacities.length, client, cluster); + + final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); + + { // run Balancer with min-block-size=50 + final BalancerParameters p = Balancer.Cli.parse(new String[]{ + "-policy", BalancingPolicy.Node.INSTANCE.getName(), + "-threshold", "1" + }); + assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE); + assertEquals(p.getThreshold(), 1.0, 0.001); + + conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50); + final int r = Balancer.run(namenodes, p, conf); + assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); + } + + conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1); + + { // run Balancer with empty nodes as source nodes + final Set sourceNodes = new HashSet<>(); + final List datanodes = cluster.getDataNodes(); + for (int i = capacities.length; i < datanodes.size(); i++) { + sourceNodes.add(datanodes.get(i).getDisplayName()); + } + final BalancerParameters p = Balancer.Cli.parse(new String[]{ + "-policy", BalancingPolicy.Node.INSTANCE.getName(), + "-threshold", "1", + "-source", StringUtils.join(sourceNodes, ',') + }); + assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE); + assertEquals(p.getThreshold(), 1.0, 0.001); + assertEquals(p.getSourceNodes(), sourceNodes); + + conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50); + final int r = Balancer.run(namenodes, p, conf); + assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r); + } + + { // run Balancer with a filled node as a source node + final Set sourceNodes = new HashSet<>(); + final List datanodes = cluster.getDataNodes(); + sourceNodes.add(datanodes.get(0).getDisplayName()); + final BalancerParameters p = Balancer.Cli.parse(new String[]{ + "-policy", BalancingPolicy.Node.INSTANCE.getName(), + "-threshold", "1", + "-source", StringUtils.join(sourceNodes, ',') + }); + assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE); + assertEquals(p.getThreshold(), 1.0, 0.001); + assertEquals(p.getSourceNodes(), sourceNodes); + + conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1); + final int r = Balancer.run(namenodes, p, conf); + assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r); + } + + { // run Balancer with all filled node as source nodes + final Set sourceNodes = new HashSet<>(); + final List datanodes = cluster.getDataNodes(); + for (int i = 0; i < capacities.length; i++) { + sourceNodes.add(datanodes.get(i).getDisplayName()); + } + final BalancerParameters p = Balancer.Cli.parse(new String[]{ + "-policy", BalancingPolicy.Node.INSTANCE.getName(), + "-threshold", "1", + "-source", StringUtils.join(sourceNodes, ',') + }); + assertEquals(p.getBalancingPolicy(), BalancingPolicy.Node.INSTANCE); + assertEquals(p.getThreshold(), 1.0, 0.001); + assertEquals(p.getSourceNodes(), sourceNodes); + + conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1); + final int r = Balancer.run(namenodes, p, conf); + assertEquals(ExitStatus.SUCCESS.getExitCode(), r); + } + } + + /** + * Verify balancer won't violate upgrade domain block placement policy. + * + * @throws Exception + */ + @Test(timeout = 100000) + public void testUpgradeDomainPolicyAfterBalance() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, + BlockPlacementPolicyWithUpgradeDomain.class, + BlockPlacementPolicy.class); + long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY}; + String[] hosts = {"host0", "host1", "host2"}; + String[] racks = {RACK0, RACK1, RACK1}; + String[] uds = {"ud0", "ud1", "ud2"}; + runBalancerAndVerifyBlockPlacmentPolicy(conf, capacities, hosts, racks, + uds, CAPACITY, "host3", RACK2, "ud2"); + } + + /** + * Verify balancer won't violate the default block placement policy. + * + * @throws Exception + */ + @Test(timeout = 100000) + public void testRackPolicyAfterBalance() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + long[] capacities = new long[]{CAPACITY, CAPACITY}; + String[] hosts = {"host0", "host1"}; + String[] racks = {RACK0, RACK1}; + runBalancerAndVerifyBlockPlacmentPolicy(conf, capacities, hosts, racks, + null, CAPACITY, "host2", RACK1, null); + } + + private void runBalancerAndVerifyBlockPlacmentPolicy(Configuration conf, + long[] capacities, String[] hosts, String[] racks, String[] UDs, + long newCapacity, String newHost, String newRack, String newUD) + throws Exception { + int numOfDatanodes = capacities.length; + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length) + .hosts(hosts).racks(racks).simulatedCapacities(capacities).build(); + DatanodeManager dm = cluster.getNamesystem().getBlockManager(). + getDatanodeManager(); + if (UDs != null) { + for (int i = 0; i < UDs.length; i++) { + DatanodeID datanodeId = cluster.getDataNodes().get(i).getDatanodeId(); + dm.getDatanode(datanodeId).setUpgradeDomain(UDs[i]); + } + } + + try { + cluster.waitActive(); + client = NameNodeProxies.createProxy(conf, + cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); + + // fill up the cluster to be 80% full + long totalCapacity = TestBalancer.sum(capacities); + long totalUsedSpace = totalCapacity * 8 / 10; + + final long fileSize = totalUsedSpace / numOfDatanodes; + DFSTestUtil.createFile(cluster.getFileSystem(0), FILE_PATH, false, 1024, + fileSize, DEFAULT_BLOCK_SIZE, (short) numOfDatanodes, 0, false); + + // start up an empty node with the same capacity on the same rack as the + // pinned host. + cluster.startDataNodes(conf, 1, true, null, new String[]{newRack}, + new String[]{newHost}, new long[]{newCapacity}); + if (newUD != null) { + DatanodeID newId = cluster.getDataNodes().get( + numOfDatanodes).getDatanodeId(); + dm.getDatanode(newId).setUpgradeDomain(newUD); + } + totalCapacity += newCapacity; + + // run balancer and validate results + TestBalancer.waitForHeartBeat(totalUsedSpace, + totalCapacity, client, cluster); + + // start rebalancing + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); + Balancer.run(namenodes, BalancerParameters.DEFAULT, conf); + BlockPlacementPolicy placementPolicy = + cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy(); + List locatedBlocks = client. + getBlockLocations(FILE_NAME, 0, fileSize).getLocatedBlocks(); + for (LocatedBlock locatedBlock : locatedBlocks) { + BlockPlacementStatus status = placementPolicy.verifyBlockPlacement( + locatedBlock.getLocations(), numOfDatanodes); + assertTrue(status.isPlacementPolicySatisfied()); + } + } finally { + cluster.shutdown(); + } + } + + /** + * Make sure that balancer can't move pinned blocks. + * If specified favoredNodes when create file, blocks will be pinned use + * sticky bit. + * + * @throws Exception + */ + @Test(timeout = 100000) + public void testBalancerWithPinnedBlocks() throws Exception { + // This test assumes stick-bit based block pin mechanism available only + // in Linux/Unix. It can be unblocked on Windows when HDFS-7759 is ready to + // provide a different mechanism for Windows. + assumeNotWindows(); + + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + conf.setBoolean(DFS_DATANODE_BLOCK_PINNING_ENABLED, true); + + long[] capacities = new long[]{CAPACITY, CAPACITY}; + String[] hosts = {"host0", "host1"}; + String[] racks = {RACK0, RACK1}; + int numOfDatanodes = capacities.length; + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length) + .hosts(hosts).racks(racks).simulatedCapacities(capacities).build(); + + cluster.waitActive(); + client = NameNodeProxies.createProxy(conf, + cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); + + // fill up the cluster to be 80% full + long totalCapacity = TestBalancer.sum(capacities); + long totalUsedSpace = totalCapacity * 8 / 10; + InetSocketAddress[] favoredNodes = new InetSocketAddress[numOfDatanodes]; + for (int i = 0; i < favoredNodes.length; i++) { + // DFSClient will attempt reverse lookup. In case it resolves + // "127.0.0.1" to "localhost", we manually specify the hostname. + int port = cluster.getDataNodes().get(i).getXferAddress().getPort(); + favoredNodes[i] = new InetSocketAddress(hosts[i], port); + } + + DFSTestUtil.createFile(cluster.getFileSystem(0), FILE_PATH, false, 1024, + totalUsedSpace / numOfDatanodes, DEFAULT_BLOCK_SIZE, + (short) numOfDatanodes, 0, false, favoredNodes); + + // start up an empty node with the same capacity + cluster.startDataNodes(conf, 1, true, null, new String[]{RACK2}, + new long[]{CAPACITY}); + + totalCapacity += CAPACITY; + + // run balancer and validate results + TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client, + cluster); + + // start rebalancing + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); + int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf); + assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r); + } + + @Test(timeout = 60000) + public void testBalancerWithSortTopNodes() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 30000); + + final long capacity = 1000L; + final int diffBetweenNodes = 50; + + // Set up the datanodes with two groups: + // 5 over-utilized nodes with 80%, 85%, 90%, 95%, 100% usage + // 2 under-utilizaed nodes with 0%, 5% usage + // With sortTopNodes option, 100% and 95% used ones will be chosen. + final int numOfOverUtilizedDn = 5; + final int numOfUnderUtilizedDn = 2; + final int totalNumOfDn = numOfOverUtilizedDn + numOfUnderUtilizedDn; + final long[] capacityArray = new long[totalNumOfDn]; + Arrays.fill(capacityArray, capacity); + + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(totalNumOfDn) + .simulatedCapacities(capacityArray) + .build(); + + cluster.setDataNodesDead(); + + List dataNodes = cluster.getDataNodes(); + + // Create top used nodes + for (int i = 0; i < numOfOverUtilizedDn; i++) { + // Bring one node alive + DataNodeTestUtils.triggerHeartbeat(dataNodes.get(i)); + DataNodeTestUtils.triggerBlockReport(dataNodes.get(i)); + // Create nodes with: 80%, 85%, 90%, 95%, 100%. + int capacityForThisDatanode = (int) capacity + - diffBetweenNodes * (numOfOverUtilizedDn - i - 1); + TestBalancer.createFile(cluster, new Path("test_big" + i), + capacityForThisDatanode, (short) 1, 0); + cluster.setDataNodesDead(); + } + + // Create under utilized nodes + for (int i = numOfUnderUtilizedDn - 1; i >= 0; i--) { + int index = i + numOfOverUtilizedDn; + // Bring one node alive + DataNodeTestUtils.triggerHeartbeat(dataNodes.get(index)); + DataNodeTestUtils.triggerBlockReport(dataNodes.get(index)); + // Create nodes with: 5%, 0% + int capacityForThisDatanode = diffBetweenNodes * i; + TestBalancer.createFile(cluster, + new Path("test_small" + i), + capacityForThisDatanode, (short) 1, 0); + cluster.setDataNodesDead(); + } + + // Bring all nodes alive + cluster.triggerHeartbeats(); + cluster.triggerBlockReports(); + cluster.waitFirstBRCompleted(0, 6000); + + final BalancerParameters p = Balancer.Cli.parse(new String[]{ + "-policy", BalancingPolicy.Node.INSTANCE.getName(), + "-threshold", "1", + "-sortTopNodes" + }); + + client = NameNodeProxies.createProxy(conf, + cluster.getFileSystem(0).getUri(), + ClientProtocol.class).getProxy(); + + // Set max-size-to-move to small number + // so only top two nodes will be chosen in one iteration. + conf.setLong(DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY, 99L); + + final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); + + List connectors = NameNodeConnector + .newNameNodeConnectors(namenodes, + Balancer.class.getSimpleName(), Balancer.BALANCER_ID_PATH, conf, + BalancerParameters.DEFAULT.getMaxIdleIteration()); + final Balancer b = new Balancer(connectors.get(0), p, conf); + Balancer.Result balancerResult = b.runOneIteration(); + + cluster.triggerDeletionReports(); + cluster.triggerBlockReports(); + cluster.triggerHeartbeats(); + + DatanodeInfo[] datanodeReport = client + .getDatanodeReport(HdfsConstants.DatanodeReportType.ALL); + + long maxUsage = 0; + for (int i = 0; i < totalNumOfDn; i++) { + maxUsage = Math.max(maxUsage, datanodeReport[i].getDfsUsed()); + } + + // The 95% usage DN will have 9 blocks of 100B and 1 block of 50B - all for the same file. + // The HDFS balancer will choose a block to move from this node randomly. More likely it will + // be 100B block. Since 100B is greater than DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY which is 99L, + // it will stop here. Total bytes moved from this 95% DN will be 1 block of size 100B. + // However, chances are the first block selected to be moved from this 95% DN is the 50B block. + // After this block is moved, the total moved size so far would be 50B which is smaller than + // DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY (99L), hence it will try to move another block. + // The second block will always be of size 100B. So total bytes moved from this 95% DN will be + // 2 blocks of size (100B + 50B) 150B. + // Hence, overall total blocks moved by HDFS balancer would be either of these 2 options: + // a) 2 blocks of total size (100B + 100B) + // b) 3 blocks of total size (50B + 100B + 100B) + assertTrue("BalancerResult is not as expected. " + balancerResult, + (balancerResult.getBytesAlreadyMoved() == 200 + && balancerResult.getBlocksMoved() == 2) + || (balancerResult.getBytesAlreadyMoved() == 250 + && balancerResult.getBlocksMoved() == 3)); + // 100% and 95% used nodes will be balanced, so top used will be 900 + assertEquals(900, maxUsage); + } + + @Test(timeout = 100000) + public void testMaxIterationTime() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + int blockSize = 10 * 1024 * 1024; // 10MB block size + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize); + // limit the worker thread count of Balancer to have only 1 queue per DN + conf.setInt(DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY, 1); + // limit the bandwidth to 4MB per sec to emulate slow block moves + conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, + 4 * 1024 * 1024); + // set client socket timeout to have an IN_PROGRESS notification back from + // the DataNode about the copy in every second. + conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2000L); + // set max iteration time to 500 ms to timeout before moving any block + conf.setLong(DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY, 500L); + // setup the cluster + final long capacity = 10L * blockSize; + final long[] dnCapacities = new long[]{capacity, capacity}; + final short rep = 1; + final long seed = 0xFAFAFA; + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .build(); + try { + cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + cluster.startDataNodes(conf, 1, true, null, null, dnCapacities); + cluster.waitClusterUp(); + cluster.waitActive(); + final Path path = new Path("/testMaxIterationTime.dat"); + DistributedFileSystem fs = cluster.getFileSystem(); + // fill the DN to 40% + DFSTestUtil.createFile(fs, path, 4L * blockSize, rep, seed); + // start a new DN + cluster.startDataNodes(conf, 1, true, null, null, dnCapacities); + cluster.triggerHeartbeats(); + // setup Balancer and run one iteration + List connectors = Collections.emptyList(); + try { + BalancerParameters bParams = BalancerParameters.DEFAULT; + // set maxIdleIterations to 1 for NO_MOVE_PROGRESS to be + // reported when there is no block move + connectors = NameNodeConnector.newNameNodeConnectors( + DFSUtil.getInternalNsRpcUris(conf), Balancer.class.getSimpleName(), + Balancer.BALANCER_ID_PATH, conf, 1); + for (NameNodeConnector nnc : connectors) { + LOG.info("NNC to work on: " + nnc); + Balancer b = new Balancer(nnc, bParams, conf); + Balancer.Result r = b.runOneIteration(); + // Since no block can be moved in 500 milli-seconds (i.e., + // 4MB/s * 0.5s = 2MB < 10MB), NO_MOVE_PROGRESS will be reported. + // When a block move is not canceled in 500 ms properly + // (highly unlikely) and then a block is moved unexpectedly, + // IN_PROGRESS will be reported. This is highly unlikely unexpected + // case. See HDFS-15989. + assertEquals("We expect ExitStatus.NO_MOVE_PROGRESS to be reported.", + ExitStatus.NO_MOVE_PROGRESS, r.getExitStatus()); + assertEquals(0, r.getBlocksMoved()); + } + } finally { + for (NameNodeConnector nnc : connectors) { + IOUtils.cleanupWithLogger(null, nnc); + } + } + } finally { + cluster.shutdown(true, true); + } + } + +} From 37db0c940fc40b86b93265efda8c6111734da18d Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 21 Apr 2021 16:06:06 +0100 Subject: [PATCH 250/666] HADOOP-17535. ABFS: ITestAzureBlobFileSystemCheckAccess test failure if no oauth key. (#2920) Contributed by Steve Loughran. --- .../fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java index 2c7b776ebe411..e52071d92e574 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java @@ -22,7 +22,6 @@ import java.lang.reflect.Field; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.junit.Assume; import org.junit.Test; @@ -316,8 +315,8 @@ private void checkPrerequisites() throws Exception { private void checkIfConfigIsSet(String configKey){ AbfsConfiguration conf = getConfiguration(); String value = conf.get(configKey); - Preconditions.checkArgument((value != null && value.trim().length() > 1), - configKey + " config is mandatory for the test to run"); + Assume.assumeTrue(configKey + " config is mandatory for the test to run", + value != null && value.trim().length() > 1); } private void assertAccessible(Path testFilePath, FsAction fsAction) From 0e644aa928e5adaf38e88bd96b12dea44620973d Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Thu, 22 Apr 2021 01:02:03 +0530 Subject: [PATCH 251/666] HADOOP-17505. public interface GroupMappingServiceProvider needs default impl for getGroupsSet() (#2661). Contributed by Vinayakumar B. --- .../apache/hadoop/security/GroupMappingServiceProvider.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java index ff6c86d5febf3..3a9073bbffaba 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java @@ -18,6 +18,7 @@ package org.apache.hadoop.security; import java.io.IOException; +import java.util.LinkedHashSet; import java.util.List; import java.util.Set; @@ -61,5 +62,8 @@ public interface GroupMappingServiceProvider { * @return set of group memberships of user * @throws IOException */ - Set getGroupsSet(String user) throws IOException; + default Set getGroupsSet(String user) throws IOException { + //Override to form the set directly to avoid another conversion + return new LinkedHashSet<>(getGroups(user)); + } } From 7c4fbd50bf82c3147b293e241ca2d788ca6b08fb Mon Sep 17 00:00:00 2001 From: zhuxiangyi <82511136+zhuxiangyi@users.noreply.github.com> Date: Fri, 23 Apr 2021 09:19:11 +0800 Subject: [PATCH 252/666] HDFS-15974. RBF: Unable to display the datanode UI of the router (#2915) Co-authored-by: zhuxiangyi Reviewed-by: Inigo Goiri Signed-off-by: Takanobu Asanuma --- .../server/federation/metrics/NamenodeBeanMetrics.java | 2 +- .../hdfs/server/federation/router/TestRouterRpc.java | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index 45ae81ed063f5..0e6e12a0d3e14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -855,7 +855,7 @@ public long getNumberOfSnapshottableDirs() { @Override public String getEnteringMaintenanceNodes() { - return "N/A"; + return "{}"; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index a7c002566482d..c3e7f10c95a00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -1748,6 +1748,14 @@ public void testRBFMetricsMethodsRelayOnStateStore() { assertEquals(0, metrics.getNumLiveNodes()); } + @Test + public void testNamenodeMetricsEnteringMaintenanceNodes() throws IOException { + final NamenodeBeanMetrics metrics = + router.getRouter().getNamenodeMetrics(); + + assertEquals("{}", metrics.getEnteringMaintenanceNodes()); + } + @Test public void testCacheAdmin() throws Exception { DistributedFileSystem routerDFS = (DistributedFileSystem) routerFS; From e802cc0fa138a2adf69105bbd1f35cbea020f456 Mon Sep 17 00:00:00 2001 From: Mehakmeet Singh Date: Fri, 23 Apr 2021 14:58:31 +0530 Subject: [PATCH 253/666] HADOOP-17471. ABFS to collect IOStatistics (#2731) The ABFS Filesystem and its input and output streams now implement the IOStatisticSource interface and provide IOStatistics on their interactions with Azure Storage. This includes the min/max/mean durations of all REST API calls. Contributed by Mehakmeet Singh --- .../fs/statistics/StoreStatisticNames.java | 24 +++ .../hadoop/fs/azurebfs/AbfsCountersImpl.java | 141 ++++++------------ .../hadoop/fs/azurebfs/AbfsStatistic.java | 73 ++++++++- .../fs/azurebfs/AzureBlobFileSystem.java | 37 ++++- .../fs/azurebfs/services/AbfsCounters.java | 13 +- .../fs/azurebfs/services/AbfsInputStream.java | 9 +- .../azurebfs/services/AbfsOutputStream.java | 52 +++---- .../azurebfs/services/AbfsRestOperation.java | 27 +++- .../azurebfs/ITestAbfsDurationTrackers.java | 110 ++++++++++++++ .../ITestAbfsInputStreamStatistics.java | 17 ++- .../ITestAbfsOutputStreamStatistics.java | 17 ++- .../fs/azurebfs/ITestAbfsStatistics.java | 26 +++- .../azurebfs/ITestAbfsStreamStatistics.java | 3 +- .../azurebfs/TestAbfsNetworkStatistics.java | 72 +++++++++ 14 files changed, 461 insertions(+), 160 deletions(-) create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java index ef04feca6917f..b6b08fe009e15 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java @@ -316,6 +316,30 @@ public final class StoreStatisticNames { public static final String ACTION_HTTP_GET_REQUEST = "action_http_get_request"; + /** + * An HTTP DELETE request was made: {@value}. + */ + public static final String ACTION_HTTP_DELETE_REQUEST + = "action_http_delete_request"; + + /** + * An HTTP PUT request was made: {@value}. + */ + public static final String ACTION_HTTP_PUT_REQUEST + = "action_http_put_request"; + + /** + * An HTTP PATCH request was made: {@value}. + */ + public static final String ACTION_HTTP_PATCH_REQUEST + = "action_http_patch_request"; + + /** + * An HTTP POST request was made: {@value}. + */ + public static final String ACTION_HTTP_POST_REQUEST + = "action_http_post_request"; + /** * An HTTP HEAD request was made: {@value}. */ diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java index 357f53b611d3c..a86913a007980 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java @@ -19,24 +19,23 @@ package org.apache.hadoop.fs.azurebfs; import java.net.URI; -import java.util.HashMap; import java.util.Map; import java.util.UUID; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; -import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStoreBuilder; import org.apache.hadoop.metrics2.MetricStringBuilder; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableMetric; import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.*; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; /** * Instrumentation of Abfs counters. @@ -62,6 +61,8 @@ public class AbfsCountersImpl implements AbfsCounters { private final MetricsRegistry registry = new MetricsRegistry("abfsMetrics").setContext(CONTEXT); + private final IOStatisticsStore ioStatisticsStore; + private static final AbfsStatistic[] STATISTIC_LIST = { CALL_CREATE, CALL_OPEN, @@ -85,7 +86,17 @@ public class AbfsCountersImpl implements AbfsCounters { BYTES_SENT, BYTES_RECEIVED, READ_THROTTLES, - WRITE_THROTTLES + WRITE_THROTTLES, + SERVER_UNAVAILABLE + }; + + private static final AbfsStatistic[] DURATION_TRACKER_LIST = { + HTTP_HEAD_REQUEST, + HTTP_GET_REQUEST, + HTTP_DELETE_REQUEST, + HTTP_PUT_REQUEST, + HTTP_PATCH_REQUEST, + HTTP_POST_REQUEST }; public AbfsCountersImpl(URI uri) { @@ -95,9 +106,17 @@ public AbfsCountersImpl(URI uri) { fileSystemInstanceId.toString()); registry.tag(METRIC_BUCKET, "Hostname from the FS URL", uri.getHost()); + IOStatisticsStoreBuilder ioStatisticsStoreBuilder = iostatisticsStore(); + // Declaring the counters. for (AbfsStatistic stats : STATISTIC_LIST) { + ioStatisticsStoreBuilder.withCounters(stats.getStatName()); createCounter(stats); } + // Declaring the DurationTrackers. + for (AbfsStatistic durationStats : DURATION_TRACKER_LIST) { + ioStatisticsStoreBuilder.withDurationTracking(durationStats.getStatName()); + } + ioStatisticsStore = ioStatisticsStoreBuilder.build(); } /** @@ -149,6 +168,7 @@ private MutableCounterLong createCounter(AbfsStatistic stats) { */ @Override public void incrementCounter(AbfsStatistic statistic, long value) { + ioStatisticsStore.incrementCounter(statistic.getStatName(), value); MutableCounterLong counter = lookupCounter(statistic.getStatName()); if (counter != null) { counter.incr(value); @@ -189,98 +209,35 @@ public String formString(String prefix, String separator, String suffix, /** * {@inheritDoc} * - * Creating a map of all the counters for testing. + * Map of all the counters for testing. * - * @return a map of the metrics. + * @return a map of the IOStatistics counters. */ @VisibleForTesting @Override public Map toMap() { - MetricsToMap metricBuilder = new MetricsToMap(null); - registry.snapshot(metricBuilder, true); - return metricBuilder.getMap(); + return ioStatisticsStore.counters(); } - protected static class MetricsToMap extends MetricsRecordBuilder { - private final MetricsCollector parent; - private final Map map = - new HashMap<>(); - - MetricsToMap(MetricsCollector parent) { - this.parent = parent; - } - - @Override - public MetricsRecordBuilder tag(MetricsInfo info, String value) { - return this; - } - - @Override - public MetricsRecordBuilder add(MetricsTag tag) { - return this; - } - - @Override - public MetricsRecordBuilder add(AbstractMetric metric) { - return this; - } - - @Override - public MetricsRecordBuilder setContext(String value) { - return this; - } - - @Override - public MetricsRecordBuilder addCounter(MetricsInfo info, int value) { - return tuple(info, value); - } - - @Override - public MetricsRecordBuilder addCounter(MetricsInfo info, long value) { - return tuple(info, value); - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo info, int value) { - return tuple(info, value); - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo info, long value) { - return tuple(info, value); - } - - public MetricsToMap tuple(MetricsInfo info, long value) { - return tuple(info.name(), value); - } - - public MetricsToMap tuple(String name, long value) { - map.put(name, value); - return this; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo info, float value) { - return tuple(info, (long) value); - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo info, double value) { - return tuple(info, (long) value); - } - - @Override - public MetricsCollector parent() { - return parent; - } + /** + * Returning the instance of IOStatisticsStore used to collect the metrics + * in AbfsCounters. + * + * @return instance of IOStatistics. + */ + @Override + public IOStatistics getIOStatistics() { + return ioStatisticsStore; + } - /** - * Get the map. - * - * @return the map of metrics. - */ - public Map getMap() { - return map; - } + /** + * Tracks the duration of a statistic. + * + * @param key name of the statistic. + * @return DurationTracker for that statistic. + */ + @Override + public DurationTracker trackDuration(String key) { + return ioStatisticsStore.trackDuration(key); } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java index 2935cd754315d..bb65b0c902120 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java @@ -18,7 +18,12 @@ package org.apache.hadoop.fs.azurebfs; +import java.util.HashMap; +import java.util.Map; + import org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames; +import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; /** * Statistic which are collected in Abfs. @@ -73,11 +78,45 @@ public enum AbfsStatistic { READ_THROTTLES("read_throttles", "Total number of times a read operation is throttled."), WRITE_THROTTLES("write_throttles", - "Total number of times a write operation is throttled."); + "Total number of times a write operation is throttled."), + SERVER_UNAVAILABLE("server_unavailable", + "Total number of times HTTP 503 status code is received in response."), + + // HTTP Duration Trackers + HTTP_HEAD_REQUEST(StoreStatisticNames.ACTION_HTTP_HEAD_REQUEST, + "Time taken to complete a HEAD request", + AbfsHttpConstants.HTTP_METHOD_HEAD), + HTTP_GET_REQUEST(StoreStatisticNames.ACTION_HTTP_GET_REQUEST, + "Time taken to complete a GET request", + AbfsHttpConstants.HTTP_METHOD_GET), + HTTP_DELETE_REQUEST(StoreStatisticNames.ACTION_HTTP_DELETE_REQUEST, + "Time taken to complete a DELETE request", + AbfsHttpConstants.HTTP_METHOD_DELETE), + HTTP_PUT_REQUEST(StoreStatisticNames.ACTION_HTTP_PUT_REQUEST, + "Time taken to complete a PUT request", + AbfsHttpConstants.HTTP_METHOD_PUT), + HTTP_PATCH_REQUEST(StoreStatisticNames.ACTION_HTTP_PATCH_REQUEST, + "Time taken to complete a PATCH request", + AbfsHttpConstants.HTTP_METHOD_PATCH), + HTTP_POST_REQUEST(StoreStatisticNames.ACTION_HTTP_POST_REQUEST, + "Time taken to complete a POST request", + AbfsHttpConstants.HTTP_METHOD_POST); private String statName; private String statDescription; + //For http call stats only. + private String httpCall; + private static final Map HTTP_CALL_TO_NAME_MAP = new HashMap<>(); + + static { + for (AbfsStatistic statistic : values()) { + if (statistic.getHttpCall() != null) { + HTTP_CALL_TO_NAME_MAP.put(statistic.getHttpCall(), statistic.getStatName()); + } + } + } + /** * Constructor of AbfsStatistic to set statistic name and description. * @@ -89,6 +128,19 @@ public enum AbfsStatistic { this.statDescription = statDescription; } + /** + * Constructor for AbfsStatistic for HTTP durationTrackers. + * + * @param statName Name of the statistic. + * @param statDescription Description of the statistic. + * @param httpCall HTTP call associated with the stat name. + */ + AbfsStatistic(String statName, String statDescription, String httpCall) { + this.statName = statName; + this.statDescription = statDescription; + this.httpCall = httpCall; + } + /** * Getter for statistic name. * @@ -106,4 +158,23 @@ public String getStatName() { public String getStatDescription() { return statDescription; } + + /** + * Getter for http call for HTTP duration trackers. + * + * @return http call of a statistic. + */ + public String getHttpCall() { + return httpCall; + } + + /** + * Get the statistic name using the http call name. + * + * @param httpCall The HTTP call used to get the statistic name. + * @return Statistic name. + */ + public static String getStatNameFromHttpCall(String httpCall) { + return HTTP_CALL_TO_NAME_MAP.get(httpCall); + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index 30108ed1e2fb3..2a8d58678a203 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -82,6 +82,8 @@ import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.Token; @@ -93,13 +95,15 @@ import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.*; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; /** * A {@link org.apache.hadoop.fs.FileSystem} for reading and writing files stored on Windows Azure */ @InterfaceStability.Evolving -public class AzureBlobFileSystem extends FileSystem { +public class AzureBlobFileSystem extends FileSystem + implements IOStatisticsSource { public static final Logger LOG = LoggerFactory.getLogger(AzureBlobFileSystem.class); private URI uri; private Path workingDir; @@ -162,11 +166,8 @@ public String toString() { sb.append("uri=").append(uri); sb.append(", user='").append(abfsStore.getUser()).append('\''); sb.append(", primaryUserGroup='").append(abfsStore.getPrimaryGroup()).append('\''); - if (abfsCounters != null) { - sb.append(", Statistics: {").append(abfsCounters.formString("{", "=", - "}", true)); - sb.append("}"); - } + sb.append(", \nIOStatistics: {").append(ioStatisticsToString(getIOStatistics())); + sb.append("}"); sb.append('}'); return sb.toString(); } @@ -425,7 +426,9 @@ private void statIncrement(AbfsStatistic statistic) { * @param statistic the Statistic to be incremented. */ private void incrementStatistic(AbfsStatistic statistic) { - abfsCounters.incrementCounter(statistic, 1); + if (abfsCounters != null) { + abfsCounters.incrementCounter(statistic, 1); + } } /** @@ -489,7 +492,9 @@ public synchronized void close() throws IOException { LOG.debug("AzureBlobFileSystem.close"); IOUtils.cleanupWithLogger(LOG, abfsStore, delegationTokenManager); this.isClosed = true; - LOG.debug("Closing Abfs: " + toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Closing Abfs: {}", toString()); + } } @Override @@ -1311,6 +1316,12 @@ boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { return abfsStore.getIsNamespaceEnabled(); } + /** + * Returns the counter() map in IOStatistics containing all the counters + * and their values. + * + * @return Map of IOStatistics counters. + */ @VisibleForTesting Map getInstrumentationMap() { return abfsCounters.toMap(); @@ -1331,4 +1342,14 @@ public boolean hasPathCapability(final Path path, final String capability) return super.hasPathCapability(p, capability); } } + + /** + * Getter for IOStatistic instance in AzureBlobFilesystem. + * + * @return the IOStatistic instance from abfsCounters. + */ + @Override + public IOStatistics getIOStatistics() { + return abfsCounters != null ? abfsCounters.getIOStatistics() : null; + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsCounters.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsCounters.java index 73996f5df8857..2dac63b166adc 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsCounters.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsCounters.java @@ -25,13 +25,16 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.azurebfs.AbfsStatistic; +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; /** * An interface for Abfs counters. */ @InterfaceAudience.Private @InterfaceStability.Unstable -public interface AbfsCounters { +public interface AbfsCounters extends IOStatisticsSource, DurationTrackerFactory { /** * Increment a AbfsStatistic by a long value. @@ -63,4 +66,12 @@ String formString(String prefix, String separator, String suffix, @VisibleForTesting Map toMap(); + /** + * Start a DurationTracker for a request. + * + * @param key Name of the DurationTracker statistic. + * @return an instance of DurationTracker. + */ + @Override + DurationTracker trackDuration(String key); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java index 0dd3dcf065fbe..c05ba0d9472aa 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java @@ -39,9 +39,6 @@ import org.apache.hadoop.fs.azurebfs.utils.CachedSASToken; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.fs.statistics.IOStatisticsSource; -import org.apache.hadoop.fs.statistics.StoreStatisticNames; -import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; -import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; import static java.lang.Math.max; import static java.lang.Math.min; @@ -485,10 +482,8 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti AbfsPerfTracker tracker = client.getAbfsPerfTracker(); try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "readRemote", "read")) { LOG.trace("Trigger client.read for path={} position={} offset={} length={}", path, position, offset, length); - op = IOStatisticsBinding.trackDuration((IOStatisticsStore) ioStatistics, - StoreStatisticNames.ACTION_HTTP_GET_REQUEST, - () -> client.read(path, position, b, offset, length, - tolerateOobAppends ? "*" : eTag, cachedSasToken.get())); + op = client.read(path, position, b, offset, length, + tolerateOobAppends ? "*" : eTag, cachedSasToken.get()); cachedSasToken.update(op.getSasToken()); if (streamStatistics != null) { streamStatistics.remoteReadOperation(); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index 80b35ee4d3a91..110764c5dfbd7 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -45,9 +45,6 @@ import org.apache.hadoop.fs.statistics.DurationTracker; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.fs.statistics.IOStatisticsSource; -import org.apache.hadoop.fs.statistics.StreamStatisticNames; -import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; -import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; import org.apache.hadoop.io.ElasticByteBufferPool; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.FSExceptionMessages; @@ -450,32 +447,29 @@ private synchronized void writeCurrentBufferToService(boolean isFlush, boolean i } } final Future job = - completionService.submit(IOStatisticsBinding - .trackDurationOfCallable((IOStatisticsStore) ioStatistics, - StreamStatisticNames.TIME_SPENT_ON_PUT_REQUEST, - () -> { - AbfsPerfTracker tracker = client.getAbfsPerfTracker(); - try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, - "writeCurrentBufferToService", "append")) { - AppendRequestParameters.Mode - mode = APPEND_MODE; - if (isFlush & isClose) { - mode = FLUSH_CLOSE_MODE; - } else if (isFlush) { - mode = FLUSH_MODE; - } - AppendRequestParameters reqParams = new AppendRequestParameters( - offset, 0, bytesLength, mode, false, leaseId); - AbfsRestOperation op = client.append(path, bytes, reqParams, - cachedSasToken.get()); - cachedSasToken.update(op.getSasToken()); - perfInfo.registerResult(op.getResult()); - byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); - perfInfo.registerSuccess(true); - return null; - } - }) - ); + completionService.submit(() -> { + AbfsPerfTracker tracker = + client.getAbfsPerfTracker(); + try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, + "writeCurrentBufferToService", "append")) { + AppendRequestParameters.Mode + mode = APPEND_MODE; + if (isFlush & isClose) { + mode = FLUSH_CLOSE_MODE; + } else if (isFlush) { + mode = FLUSH_MODE; + } + AppendRequestParameters reqParams = new AppendRequestParameters( + offset, 0, bytesLength, mode, false, leaseId); + AbfsRestOperation op = client.append(path, bytes, reqParams, + cachedSasToken.get()); + cachedSasToken.update(op.getSasToken()); + perfInfo.registerResult(op.getResult()); + byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); + perfInfo.registerSuccess(true); + return null; + } + }); if (outputStreamStatistics != null) { if (job.isCancelled()) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index b046cbc03a30b..4c24c37a0dfb0 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -19,12 +19,12 @@ package org.apache.hadoop.fs.azurebfs.services; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.HttpURLConnection; import java.net.URL; import java.net.UnknownHostException; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidAbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; /** * The AbfsRestOperation for Rest AbfsClient. @@ -167,12 +168,30 @@ String getSasToken() { this.abfsCounters = client.getAbfsCounters(); } + /** + * Execute a AbfsRestOperation. Track the Duration of a request if + * abfsCounters isn't null. + * + */ + public void execute() throws AzureBlobFileSystemException { + + try { + IOStatisticsBinding.trackDurationOfInvocation(abfsCounters, + AbfsStatistic.getStatNameFromHttpCall(method), + () -> completeExecute()); + } catch (AzureBlobFileSystemException aze) { + throw aze; + } catch (IOException e) { + throw new UncheckedIOException("Error while tracking Duration of an " + + "AbfsRestOperation call", e); + } + } + /** * Executes the REST operation with retry, by issuing one or more * HTTP operations. */ - @VisibleForTesting - public void execute() throws AzureBlobFileSystemException { + private void completeExecute() throws AzureBlobFileSystemException { // see if we have latency reports from the previous requests String latencyHeader = this.client.getAbfsPerfTracker().getClientLatency(); if (latencyHeader != null && !latencyHeader.isEmpty()) { @@ -259,6 +278,8 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS && httpOperation.getStatusCode() <= HttpURLConnection.HTTP_PARTIAL) { incrementCounter(AbfsStatistic.BYTES_RECEIVED, httpOperation.getBytesReceived()); + } else if (httpOperation.getStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) { + incrementCounter(AbfsStatistic.SERVER_UNAVAILABLE, 1); } } catch (UnknownHostException ex) { String hostname = null; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java new file mode 100644 index 0000000000000..c8b687d233cb5 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; +import org.apache.hadoop.io.IOUtils; + +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.HTTP_DELETE_REQUEST; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.HTTP_GET_REQUEST; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.HTTP_HEAD_REQUEST; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.HTTP_PUT_REQUEST; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.extractStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupMeanStatistic; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; + +public class ITestAbfsDurationTrackers extends AbstractAbfsIntegrationTest { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestAbfsDurationTrackers.class); + private static final AbfsStatistic[] HTTP_DURATION_TRACKER_LIST = { + HTTP_HEAD_REQUEST, + HTTP_GET_REQUEST, + HTTP_DELETE_REQUEST, + HTTP_PUT_REQUEST, + }; + + public ITestAbfsDurationTrackers() throws Exception { + } + + /** + * Test to check if DurationTrackers for Abfs HTTP calls work correctly and + * track the duration of the http calls. + */ + @Test + public void testAbfsHttpCallsDurations() throws IOException { + describe("test to verify if the DurationTrackers for abfs http calls " + + "work as expected."); + + AzureBlobFileSystem fs = getFileSystem(); + Path testFilePath = path(getMethodName()); + + // Declaring output and input stream. + AbfsOutputStream out = null; + AbfsInputStream in = null; + try { + // PUT the file. + out = createAbfsOutputStreamWithFlushEnabled(fs, testFilePath); + out.write('a'); + out.hflush(); + + // GET the file. + in = fs.getAbfsStore().openFileForRead(testFilePath, fs.getFsStatistics()); + int res = in.read(); + LOG.info("Result of Read: {}", res); + + // DELETE the file. + fs.delete(testFilePath, false); + + // extract the IOStatistics from the filesystem. + IOStatistics ioStatistics = extractStatistics(fs); + LOG.info(ioStatisticsToPrettyString(ioStatistics)); + assertDurationTracker(ioStatistics); + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + } + + /** + * A method to assert that all the DurationTrackers for the http calls are + * working correctly. + * + * @param ioStatistics the IOStatisticsSource in use. + */ + private void assertDurationTracker(IOStatistics ioStatistics) { + for (AbfsStatistic abfsStatistic : HTTP_DURATION_TRACKER_LIST) { + Assertions.assertThat(lookupMeanStatistic(ioStatistics, + abfsStatistic.getStatName() + StoreStatisticNames.SUFFIX_MEAN).mean()) + .describedAs("The DurationTracker Named " + abfsStatistic.getStatName() + + " Doesn't match the expected value.") + .isGreaterThan(0.0); + } + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java index a33a76ecefe77..4d47d5a6ed0c1 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java @@ -31,8 +31,14 @@ import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamStatisticsImpl; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; import org.apache.hadoop.io.IOUtils; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.extractStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupMeanStatistic; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; + public class ITestAbfsInputStreamStatistics extends AbstractAbfsIntegrationTest { private static final int OPERATIONS = 10; @@ -386,12 +392,13 @@ public void testActionHttpGetRequest() throws IOException { abfsInputStream = abfss.openFileForRead(actionHttpGetRequestPath, fs.getFsStatistics()); abfsInputStream.read(); - AbfsInputStreamStatisticsImpl abfsInputStreamStatistics = - (AbfsInputStreamStatisticsImpl) abfsInputStream.getStreamStatistics(); - - LOG.info("AbfsInputStreamStats info: {}", abfsInputStreamStatistics.toString()); + IOStatistics ioStatistics = extractStatistics(fs); + LOG.info("AbfsInputStreamStats info: {}", + ioStatisticsToPrettyString(ioStatistics)); Assertions.assertThat( - abfsInputStreamStatistics.getActionHttpGetRequest()) + lookupMeanStatistic(ioStatistics, + AbfsStatistic.HTTP_GET_REQUEST.getStatName() + + StoreStatisticNames.SUFFIX_MEAN).mean()) .describedAs("Mismatch in time taken by a GET request") .isGreaterThan(0.0); } finally { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java index 392e80a0a782c..8be997ce69cf3 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java @@ -28,6 +28,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStreamStatisticsImpl; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; + +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.extractStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupMeanStatistic; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; /** * Test AbfsOutputStream statistics. @@ -241,10 +247,13 @@ public void testAbfsOutputStreamDurationTrackerPutRequest() throws IOException { outputStream.write('a'); outputStream.hflush(); - AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = - getAbfsOutputStreamStatistics(outputStream); - LOG.info("AbfsOutputStreamStats info: {}", abfsOutputStreamStatistics.toString()); - Assertions.assertThat(abfsOutputStreamStatistics.getTimeSpentOnPutRequest()) + IOStatistics ioStatistics = extractStatistics(fs); + LOG.info("AbfsOutputStreamStats info: {}", + ioStatisticsToPrettyString(ioStatistics)); + Assertions.assertThat( + lookupMeanStatistic(ioStatistics, + AbfsStatistic.HTTP_PUT_REQUEST.getStatName() + + StoreStatisticNames.SUFFIX_MEAN).mean()) .describedAs("Mismatch in timeSpentOnPutRequest DurationTracker") .isGreaterThan(0.0); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java index 42205807c1b3e..ccfc9f45392f9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.statistics.IOStatistics; /** * Tests AzureBlobFileSystem Statistics. @@ -46,14 +47,21 @@ public void testInitialStatsValues() throws IOException { AbfsCounters abfsCounters = new AbfsCountersImpl(getFileSystem().getUri()); - Map metricMap = abfsCounters.toMap(); + IOStatistics ioStatistics = abfsCounters.getIOStatistics(); - for (Map.Entry entry : metricMap.entrySet()) { - String key = entry.getKey(); - Long value = entry.getValue(); + //Initial value verification for counters + for (Map.Entry entry : ioStatistics.counters().entrySet()) { + checkInitialValue(entry.getKey(), entry.getValue(), 0); + } + + //Initial value verification for gauges + for (Map.Entry entry : ioStatistics.gauges().entrySet()) { + checkInitialValue(entry.getKey(), entry.getValue(), 0); + } - //Verify if initial value of statistic is 0. - checkInitialValue(key, value); + //Initial value verifications for DurationTrackers + for (Map.Entry entry : ioStatistics.maximums().entrySet()) { + checkInitialValue(entry.getKey(), entry.getValue(), -1); } } @@ -251,8 +259,10 @@ Testing exists() calls and rename calls. Since both were called 2 * * @param statName name of the statistic to be checked. * @param statValue value of the statistic. + * @param expectedInitialValue initial value expected from this statistic. */ - private void checkInitialValue(String statName, long statValue) { - assertEquals("Mismatch in " + statName, 0, statValue); + private void checkInitialValue(String statName, long statValue, + long expectedInitialValue) { + assertEquals("Mismatch in " + statName, expectedInitialValue, statValue); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java index 395a456124bdf..7eadb4bb8ff23 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java @@ -39,13 +39,12 @@ public ITestAbfsStreamStatistics() throws Exception { private static final Logger LOG = LoggerFactory.getLogger(ITestAbfsStreamStatistics.class); - private static final int LARGE_NUMBER_OF_OPS = 999999; + private static final int LARGE_NUMBER_OF_OPS = 99; /*** * Testing {@code incrementReadOps()} in class {@code AbfsInputStream} and * {@code incrementWriteOps()} in class {@code AbfsOutputStream}. * - * @throws Exception */ @Test public void testAbfsStreamOps() throws Exception { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java index 0639cf2f82b9a..628ad30863c9a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java @@ -21,13 +21,31 @@ import java.io.IOException; import java.util.Map; +import org.assertj.core.api.Assertions; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; + +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.HTTP_PATCH_REQUEST; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.HTTP_POST_REQUEST; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.extractStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupMeanStatistic; public class TestAbfsNetworkStatistics extends AbstractAbfsIntegrationTest { + private static final Logger LOG = + LoggerFactory.getLogger(TestAbfsNetworkStatistics.class); private static final int LARGE_OPERATIONS = 1000; + private static final AbfsStatistic[] HTTP_DURATION_TRACKER_LIST = { + HTTP_POST_REQUEST, + HTTP_PATCH_REQUEST + }; public TestAbfsNetworkStatistics() throws Exception { } @@ -64,4 +82,58 @@ public void testAbfsThrottlingStatistics() throws IOException { assertAbfsStatistics(AbfsStatistic.WRITE_THROTTLES, LARGE_OPERATIONS, metricMap); } + + /** + * Test to check if the DurationTrackers are tracking as expected whilst + * doing some work. + */ + @Test + public void testAbfsNetworkDurationTrackers() + throws IOException, InterruptedException { + describe("Test to verify the actual values of DurationTrackers are " + + "greater than 0.0 while tracking some work."); + + AbfsCounters abfsCounters = new AbfsCountersImpl(getFileSystem().getUri()); + // Start dummy work for the DurationTrackers and start tracking. + try (DurationTracker ignoredPatch = + abfsCounters.trackDuration(AbfsStatistic.getStatNameFromHttpCall(AbfsHttpConstants.HTTP_METHOD_PATCH)); + DurationTracker ignoredPost = + abfsCounters.trackDuration(AbfsStatistic.getStatNameFromHttpCall(AbfsHttpConstants.HTTP_METHOD_POST)) + ) { + // Emulates doing some work. + Thread.sleep(10); + LOG.info("Execute some Http requests..."); + } + + // Extract the iostats from the abfsCounters instance. + IOStatistics ioStatistics = extractStatistics(abfsCounters); + // Asserting that the durationTrackers have mean > 0.0. + for (AbfsStatistic abfsStatistic : HTTP_DURATION_TRACKER_LIST) { + Assertions.assertThat(lookupMeanStatistic(ioStatistics, + abfsStatistic.getStatName() + StoreStatisticNames.SUFFIX_MEAN).mean()) + .describedAs("The DurationTracker Named " + abfsStatistic.getStatName() + + " Doesn't match the expected value") + .isGreaterThan(0.0); + } + } + + /** + * Test to check if abfs counter for HTTP 503 statusCode works correctly + * when incremented. + */ + @Test + public void testAbfsHTTP503ErrorCounter() throws IOException { + describe("tests to verify the expected value of the HTTP 503 error " + + "counter is equal to number of times incremented."); + + AbfsCounters abfsCounters = new AbfsCountersImpl(getFileSystem().getUri()); + // Incrementing the server_unavailable counter. + for (int i = 0; i < LARGE_OPERATIONS; i++) { + abfsCounters.incrementCounter(AbfsStatistic.SERVER_UNAVAILABLE, 1); + } + // Getting the IOStatistics counter map from abfsCounters. + Map metricsMap = abfsCounters.toMap(); + assertAbfsStatistics(AbfsStatistic.SERVER_UNAVAILABLE, LARGE_OPERATIONS, + metricsMap); + } } From 28337bfe7a9b15f1e81ae56bc315e26b4ce3ba18 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 23 Apr 2021 19:52:41 +0900 Subject: [PATCH 254/666] HADOOP-17655. Upgrade Jetty to 9.4.40. (#2947) Reviewed-by: Wei-Chiu Chuang --- hadoop-project/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index e950ed9c6f208..e76a1d65f2ea9 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -37,7 +37,7 @@ true true - 9.4.39.v20210325 + 9.4.40.v20210413 _ _ From 10834036baa29939a8e4ae1726abeef5e1561062 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 23 Apr 2021 16:07:58 +0200 Subject: [PATCH 255/666] YARN-10654. Dots '.' in CSMappingRule path variables should be replaced. Contributed by Peter Bacsko --- .../placement/CSMappingPlacementRule.java | 22 ++++-- .../TestCSMappingPlacementRule.java | 67 +++++++++++++++++-- 2 files changed, 77 insertions(+), 12 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java index 894bc8262221a..9648769231c96 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/CSMappingPlacementRule.java @@ -42,8 +42,6 @@ import java.util.List; import java.util.Set; -import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT; - /** * This class is responsible for making application submissions to queue * assignments, based on the configured ruleset. This class supports all @@ -55,6 +53,8 @@ public class CSMappingPlacementRule extends PlacementRule { private static final Logger LOG = LoggerFactory .getLogger(CSMappingPlacementRule.class); + private static final String DOT = "."; + private static final String DOT_REPLACEMENT = "_dot_"; private CapacitySchedulerQueueManager queueManager; private List mappingRules; @@ -194,12 +194,13 @@ private void setupGroupsForVariableContext(VariableContext vctx, String user) return; } Iterator it = groupsSet.iterator(); - String primaryGroup = it.next(); + String primaryGroup = cleanName(it.next()); ArrayList secondaryGroupList = new ArrayList<>(); while (it.hasNext()) { - secondaryGroupList.add(it.next()); + String groupName = cleanName(it.next()); + secondaryGroupList.add(groupName); } if (secondaryGroupList.size() == 0) { @@ -226,7 +227,7 @@ private VariableContext createVariableContext( ApplicationSubmissionContext asc, String user) { VariableContext vctx = new VariableContext(); - vctx.put("%user", user); + vctx.put("%user", cleanName(user)); //If the specified matches the default it means NO queue have been specified //as per ClientRMService#submitApplication which sets the queue to default //when no queue is provided. @@ -522,4 +523,15 @@ private ApplicationPlacementContext placeToDefault( " mapping rule. Please see the logs for details"); } } + + private String cleanName(String name) { + if (name.contains(DOT)) { + String converted = name.replaceAll("\\.", DOT_REPLACEMENT); + LOG.warn("Name {} is converted to {} when it is used as a queue name.", + name, converted); + return converted; + } else { + return name; + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java index f6bb2a1587836..0cf10595ee9dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestCSMappingPlacementRule.java @@ -63,13 +63,15 @@ public class TestCSMappingPlacementRule { @Rule public TemporaryFolder folder = new TemporaryFolder(); - private Map> userGroups = ImmutableMap.of( - "alice", ImmutableSet.of("p_alice", "unique", "user"), - "bob", ImmutableSet.of("p_bob", "user", "developer"), - "charlie", ImmutableSet.of("p_charlie", "user", "tester"), - "dave", ImmutableSet.of("user"), - "emily", ImmutableSet.of("user", "tester", "developer") - ); + private Map> userGroups = + ImmutableMap.>builder() + .put("alice", ImmutableSet.of("p_alice", "unique", "user")) + .put("bob", ImmutableSet.of("p_bob", "user", "developer")) + .put("charlie", ImmutableSet.of("p_charlie", "user", "tester")) + .put("dave", ImmutableSet.of("user")) + .put("emily", ImmutableSet.of("user", "tester", "developer")) + .put("test.user", ImmutableSet.of("main.grp", "sec.test.grp")) + .build(); private void createQueueHierarchy(CapacitySchedulerQueueManager queueManager) { MockQueueHierarchyBuilder.create() @@ -79,6 +81,9 @@ private void createQueueHierarchy(CapacitySchedulerQueueManager queueManager) { .withManagedParentQueue("root.man") .withQueue("root.user.alice") .withQueue("root.user.bob") + .withQueue("root.user.test_dot_user") + .withQueue("root.groups.main_dot_grp") + .withQueue("root.groups.sec_dot_test_dot_grp") .withQueue("root.secondaryTests.unique") .withQueue("root.secondaryTests.user") .withQueue("root.ambiguous.user.charlie") @@ -718,4 +723,52 @@ public void testJSONFileConfiguration() throws IOException { assertConfigTestResult(rules); } + + @Test + public void testUserNameCleanup() throws IOException { + ArrayList rules = new ArrayList<>(); + rules.add( + new MappingRule( + MappingRuleMatchers.createAllMatcher(), + (new MappingRuleActions.PlaceToQueueAction("%user", true)) + .setFallbackReject())); + + CSMappingPlacementRule engine = setupEngine(true, rules); + ApplicationSubmissionContext app = createApp("app"); + assertPlace( + "test.user should be placed to root.users.test_dot_user", + engine, app, "test.user", "root.user.test_dot_user"); + } + + @Test + public void testPrimaryGroupNameCleanup() throws IOException { + ArrayList rules = new ArrayList<>(); + rules.add( + new MappingRule( + MappingRuleMatchers.createAllMatcher(), + (new MappingRuleActions.PlaceToQueueAction("%primary_group", true)) + .setFallbackReject())); + + CSMappingPlacementRule engine = setupEngine(true, rules); + ApplicationSubmissionContext app = createApp("app"); + assertPlace( + "Application should have been placed to root.groups.main_dot_grp", + engine, app, "test.user", "root.groups.main_dot_grp"); + } + + @Test + public void testSecondaryGroupNameCleanup() throws IOException { + ArrayList rules = new ArrayList<>(); + rules.add( + new MappingRule( + MappingRuleMatchers.createAllMatcher(), + (new MappingRuleActions.PlaceToQueueAction("%secondary_group", true)) + .setFallbackReject())); + + CSMappingPlacementRule engine = setupEngine(true, rules); + ApplicationSubmissionContext app = createApp("app"); + assertPlace( + "Application should have been placed to root.groups.sec_dot_test_dot_grp", + engine, app, "test.user", "root.groups.sec_dot_test_dot_grp"); + } } \ No newline at end of file From a7d0b74f8f4bbd14c80b17a36b6ca9954d0fa0ae Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 23 Apr 2021 16:12:12 +0200 Subject: [PATCH 256/666] YARN-10746. RmWebApp add default-node-label-expression to the queue info. Contributed by Gergely Pollak --- .../webapp/dao/CapacitySchedulerInfo.java | 3 ++ .../dao/CapacitySchedulerLeafQueueInfo.java | 12 ++--- .../dao/CapacitySchedulerQueueInfo.java | 8 ++- .../TestRMWebServicesCapacitySched.java | 52 +++++++++++++++++++ 4 files changed, 65 insertions(+), 10 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java index fc18d4d6e4428..78e2133a90b2a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java @@ -61,6 +61,7 @@ public class CapacitySchedulerInfo extends SchedulerInfo { protected String queueType; protected String creationMethod; protected String autoCreationEligibility; + protected String defaultNodeLabelExpression; @XmlTransient static final float EPSILON = 1e-8f; @@ -114,6 +115,8 @@ public CapacitySchedulerInfo(CSQueue parent, CapacityScheduler cs) { creationMethod = CapacitySchedulerInfoHelper.getCreationMethod(parent); autoCreationEligibility = CapacitySchedulerInfoHelper .getAutoCreationEligibility(parent); + + defaultNodeLabelExpression = parent.getDefaultNodeLabelExpression(); } public float getCapacity() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java index 6aef01bb3dd1c..16abb991df90d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java @@ -52,7 +52,6 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo { protected ResourceInfo userAMResourceLimit; protected boolean preemptionDisabled; protected boolean intraQueuePreemptionDisabled; - protected String defaultNodeLabelExpression; protected int defaultPriority; protected boolean isAutoCreatedLeafQueue; protected long maxApplicationLifetime; @@ -81,7 +80,6 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo { intraQueuePreemptionDisabled = q.getIntraQueuePreemptionDisabled(); orderingPolicyDisplayName = q.getOrderingPolicy().getInfo(); orderingPolicyInfo = q.getOrderingPolicy().getConfigName(); - defaultNodeLabelExpression = q.getDefaultNodeLabelExpression(); defaultPriority = q.getDefaultApplicationPriority().getPriority(); ArrayList usersList = users.getUsersList(); if (usersList.isEmpty()) { @@ -148,17 +146,17 @@ public float getUserLimitFactor() { public float getConfiguredMaxAMResourceLimit() { return configuredMaxAMResourceLimit; } - + public ResourceInfo getAMResourceLimit() { return AMResourceLimit; } - + public ResourceInfo getUsedAMResource() { return usedAMResource; } public ResourceInfo getUserAMResourceLimit() { - return userAMResourceLimit; + return userAMResourceLimit; } public boolean getPreemptionDisabled() { @@ -172,10 +170,6 @@ public boolean getIntraQueuePreemptionDisabled() { public String getOrderingPolicyDisplayName() { return orderingPolicyDisplayName; } - - public String getDefaultNodeLabelExpression() { - return defaultNodeLabelExpression; - } public int getDefaultApplicationPriority() { return defaultPriority; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java index 3865cce909f5b..31857bb94b6b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java @@ -91,6 +91,7 @@ public class CapacitySchedulerQueueInfo { protected String queueType; protected String creationMethod; protected String autoCreationEligibility; + protected String defaultNodeLabelExpression; CapacitySchedulerQueueInfo() { }; @@ -120,6 +121,7 @@ public class CapacitySchedulerQueueInfo { reservedContainers = q.getMetrics().getReservedContainers(); queueName = q.getQueueName(); state = q.getState(); + defaultNodeLabelExpression = q.getDefaultNodeLabelExpression(); resourcesUsed = new ResourceInfo(q.getUsedResources()); if (q instanceof PlanQueue && !((PlanQueue) q).showReservationsAsQueues()) { hideReservationQueues = true; @@ -271,7 +273,7 @@ public ResourceInfo getResourcesUsed() { static float cap(float val, float low, float hi) { return Math.min(Math.max(val, low), hi); } - + public ArrayList getNodeLabels() { return this.nodeLabels; } @@ -335,4 +337,8 @@ public float getWeight() { public float getNormalizedWeight() { return normalizedWeight; } + + public String getDefaultNodeLabelExpression() { + return defaultNodeLabelExpression; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java index a0a088497b717..ab3294191bf88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java @@ -661,6 +661,58 @@ public void testPerUserResourcesXML() throws Exception { } } + @Test + public void testNodeLabelDefaultAPI() throws Exception { + CapacitySchedulerConfiguration config = + ((CapacityScheduler)rm.getResourceScheduler()).getConfiguration(); + + config.setDefaultNodeLabelExpression("root", "ROOT-INHERITED"); + config.setDefaultNodeLabelExpression("root.a", "root-a-default-label"); + rm.getResourceScheduler().reinitialize(config, rm.getRMContext()); + + //Start RM so that it accepts app submissions + rm.start(); + try { + //Get the XML from ws/v1/cluster/scheduler + WebResource r = resource(); + ClientResponse response = r.path("ws/v1/cluster/scheduler") + .accept(MediaType.APPLICATION_XML).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_XML_TYPE + "; " + JettyUtils.UTF_8, + response.getType().toString()); + String xml = response.getEntity(String.class); + DocumentBuilder db = DocumentBuilderFactory.newInstance() + .newDocumentBuilder(); + InputSource is = new InputSource(); + is.setCharacterStream(new StringReader(xml)); + //Parse the XML we got + Document dom = db.parse(is); + + NodeList allQueues = dom.getElementsByTagName("queue"); + for (int i = 0; i < allQueues.getLength(); ++i) { + Node queueNode = allQueues.item(i); + Node queuePathNode = getChildNodeByName(queueNode, "queuePath"); + if (queuePathNode == null) { + continue; + } + + String queuePath = queuePathNode.getTextContent(); + if (queuePath != null) { + if (queuePath.startsWith("root.a")) { + assertEquals("root-a-default-label", + getChildNodeByName(queueNode, "defaultNodeLabelExpression") + .getTextContent()); + } else { + assertEquals("ROOT-INHERITED", + getChildNodeByName(queueNode, "defaultNodeLabelExpression") + .getTextContent()); + } + } + } + } finally { + rm.stop(); + } + } + private void checkResourcesUsed(JSONObject queue) throws JSONException { queue.getJSONObject("resourcesUsed").getInt("memory"); queue.getJSONObject("resourcesUsed").getInt("vCores"); From 4ff73d9e2927fcb530e1dc06b7d8c31f393860b2 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 23 Apr 2021 16:17:29 +0200 Subject: [PATCH 257/666] YARN-10750. TestMetricsInvariantChecker.testManyRuns is broken since HADOOP-17524. Contributed by Gergely Pollak --- .../src/test/resources/invariants.txt | 4 ---- .../src/test/resources/invariants_jdk9.txt | 4 ---- 2 files changed, 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/invariants.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/invariants.txt index 363ed0d996e51..a77fdf086b118 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/invariants.txt +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/invariants.txt @@ -48,7 +48,3 @@ ThreadsBlocked >= 0 ThreadsWaiting >= 0 ThreadsTimedWaiting >= 0 ThreadsTerminated >= 0 -LogFatal >= 0 -LogError >= 0 -LogWarn >= 0 -LogInfo >= 0 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/invariants_jdk9.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/invariants_jdk9.txt index e081334739710..b40a7aa7116f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/invariants_jdk9.txt +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/invariants_jdk9.txt @@ -48,7 +48,3 @@ ThreadsBlocked >= 0 ThreadsWaiting >= 0 ThreadsTimedWaiting >= 0 ThreadsTerminated >= 0 -LogFatal >= 0 -LogError >= 0 -LogWarn >= 0 -LogInfo >= 0 From 520aacafc54f02fac53ab95e2b833d7df9068e1d Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Fri, 23 Apr 2021 17:38:34 +0200 Subject: [PATCH 258/666] YARN-10705. Misleading DEBUG log for container assignment needs to be removed when the container is actually reserved, not assigned in FairScheduler. Contributed by Siddharth Ahuja. --- .../scheduler/fair/FSLeafQueue.java | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index 5eafa522f72ad..af366ec55dc0a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -352,9 +352,19 @@ public Resource assignContainer(FSSchedulerNode node) { continue; } assigned = sched.assignContainer(node); - if (!assigned.equals(none())) { - LOG.debug("Assigned container in queue:{} container:{}", - getName(), assigned); + + boolean isContainerAssignedOrReserved = !assigned.equals(none()); + boolean isContainerReserved = + assigned.equals(FairScheduler.CONTAINER_RESERVED); + + // check if an assignment or a reservation was made. + if (isContainerAssignedOrReserved) { + // only log container assignment if there was an actual allocation, + // not a reservation. + if (!isContainerReserved && LOG.isDebugEnabled()) { + LOG.debug("Assigned container in queue:{} container:{}", + getName(), assigned); + } break; } } From 378aa3f69d0622dedebd86a2a3d682f9ad390f0b Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 23 Apr 2021 22:48:15 +0530 Subject: [PATCH 259/666] HADOOP-17620. DistCp: Use Iterator for listing target directory as well. (#2861). Contributed by Ayush Saxena. Signed-off-by: Vinayakumar B --- .../org/apache/hadoop/tools/SimpleCopyListing.java | 5 +++-- .../apache/hadoop/tools/mapred/CopyCommitter.java | 3 +++ .../tools/contract/AbstractContractDistCpTest.java | 13 ++++++++++--- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java index 900ce6296db08..fb7ace5f1ead5 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java @@ -65,7 +65,8 @@ * Note: The SimpleCopyListing doesn't handle wild-cards in the input-paths. */ public class SimpleCopyListing extends CopyListing { - private static final Logger LOG = LoggerFactory.getLogger(SimpleCopyListing.class); + public static final Logger LOG = + LoggerFactory.getLogger(SimpleCopyListing.class); public static final int DEFAULT_FILE_STATUS_SIZE = 1000; public static final boolean DEFAULT_RANDOMIZE_FILE_LISTING = true; @@ -601,7 +602,7 @@ public WorkReport processItem( } private void printStats() { - LOG.info("Paths (files+dirs) cnt = {}; dirCnt = ", totalPaths, totalDirs); + LOG.info("Paths (files+dirs) cnt = {}; dirCnt = {}", totalPaths, totalDirs); } private void maybePrintStats() { diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java index e346d0b938c93..33ab3ee4eba81 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java @@ -564,12 +564,15 @@ private Path listTargetFiles(final Configuration conf, // thread count is picked up from the job int threads = conf.getInt(DistCpConstants.CONF_LABEL_LISTSTATUS_THREADS, DistCpConstants.DEFAULT_LISTSTATUS_THREADS); + boolean useIterator = + conf.getBoolean(DistCpConstants.CONF_LABEL_USE_ITERATOR, false); LOG.info("Scanning destination directory {} with thread count: {}", targetFinalPath, threads); DistCpOptions options = new DistCpOptions.Builder(targets, resultNonePath) .withOverwrite(overwrite) .withSyncFolder(syncFolder) .withNumListstatusThreads(threads) + .withUseIterator(useIterator) .build(); DistCpContext distCpContext = new DistCpContext(options); distCpContext.setTargetPathExists(targetPathExists); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index 202ead69a17bb..fab14d138b551 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -42,6 +42,7 @@ import org.apache.hadoop.tools.DistCp; import org.apache.hadoop.tools.DistCpConstants; import org.apache.hadoop.tools.DistCpOptions; +import org.apache.hadoop.tools.SimpleCopyListing; import org.apache.hadoop.tools.mapred.CopyMapper; import org.apache.hadoop.tools.util.DistCpTestUtils; import org.apache.hadoop.util.functional.RemoteIterators; @@ -628,11 +629,17 @@ public void testDistCpWithIterator() throws Exception { GenericTestUtils .createFiles(remoteFS, source, getDepth(), getWidth(), getWidth()); + GenericTestUtils.LogCapturer log = + GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG); + DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), - dest.toString(), "-useiterator", conf); + dest.toString(), "-useiterator -update -delete", conf); + + // Check the target listing was also done using iterator. + Assertions.assertThat(log.getOutput()).contains( + "Building listing using iterator mode for " + dest.toString()); - Assertions - .assertThat(RemoteIterators.toList(localFS.listFiles(dest, true))) + Assertions.assertThat(RemoteIterators.toList(localFS.listFiles(dest, true))) .describedAs("files").hasSize(getTotalFiles()); } From 97d5a812a41399578b2ae7b86c1cf48ec68333a8 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Fri, 23 Apr 2021 18:44:41 +0100 Subject: [PATCH 260/666] HADOOP-17597. Optionally downgrade on S3A Syncable calls (#2801) Followup to HADOOP-13327, which changed S3A output stream hsync/hflush calls to raise an exception. Adds a new option fs.s3a.downgrade.syncable.exceptions When true, calls to Syncable hsync/hflush on S3A output streams will log once at warn (for entire process life, not just the stream), then increment IOStats with the relevant operation counter With the downgrade option false (default) * IOStats are incremented * The UnsupportedOperationException current raised includes a link to the JIRA. Contributed by Steve Loughran. --- .../fs/statistics/StoreStatisticNames.java | 6 + .../org/apache/hadoop/fs/s3a/Constants.java | 22 +- .../hadoop/fs/s3a/S3ABlockOutputStream.java | 258 ++++++++++++++---- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 27 +- .../hadoop/fs/s3a/S3AInstrumentation.java | 14 +- .../org/apache/hadoop/fs/s3a/Statistic.java | 8 + .../hadoop/fs/s3a/WriteOperationHelper.java | 5 + .../apache/hadoop/fs/s3a/WriteOperations.java | 6 + .../BlockOutputStreamStatistics.java | 10 + .../impl/EmptyS3AStatisticsContext.java | 8 + .../tools/hadoop-aws/troubleshooting_s3a.md | 37 ++- .../hadoop/fs/s3a/ITestDowngradeSyncable.java | 114 ++++++++ .../fs/s3a/TestS3ABlockOutputStream.java | 55 +++- 13 files changed, 499 insertions(+), 71 deletions(-) create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestDowngradeSyncable.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java index b6b08fe009e15..95144393585a1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java @@ -84,6 +84,12 @@ public final class StoreStatisticNames { /** {@value}. */ public static final String OP_IS_FILE = "op_is_file"; + /** {@value}. */ + public static final String OP_HFLUSH = "op_hflush"; + + /** {@value}. */ + public static final String OP_HSYNC = "op_hsync"; + /** {@value}. */ public static final String OP_IS_DIRECTORY = "op_is_directory"; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index c4b8f6e3c4683..f6900cb1c8e19 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -329,7 +329,6 @@ private Constants() { * Default is {@link #FAST_UPLOAD_BUFFER_DISK} * Value: {@value} */ - @InterfaceStability.Unstable public static final String FAST_UPLOAD_BUFFER = "fs.s3a.fast.upload.buffer"; @@ -338,26 +337,22 @@ private Constants() { * Capacity is limited to available disk space. */ - @InterfaceStability.Unstable public static final String FAST_UPLOAD_BUFFER_DISK = "disk"; /** * Use an in-memory array. Fast but will run of heap rapidly: {@value}. */ - @InterfaceStability.Unstable public static final String FAST_UPLOAD_BUFFER_ARRAY = "array"; /** * Use a byte buffer. May be more memory efficient than the * {@link #FAST_UPLOAD_BUFFER_ARRAY}: {@value}. */ - @InterfaceStability.Unstable public static final String FAST_UPLOAD_BYTEBUFFER = "bytebuffer"; /** * Default buffer option: {@value}. */ - @InterfaceStability.Unstable public static final String DEFAULT_FAST_UPLOAD_BUFFER = FAST_UPLOAD_BUFFER_DISK; @@ -370,7 +365,6 @@ private Constants() { *

    * Default is {@link #DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS} */ - @InterfaceStability.Unstable public static final String FAST_UPLOAD_ACTIVE_BLOCKS = "fs.s3a.fast.upload.active.blocks"; @@ -378,9 +372,23 @@ private Constants() { * Limit of queued block upload operations before writes * block. Value: {@value} */ - @InterfaceStability.Unstable public static final int DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS = 4; + /** + * Rather than raise an exception when an attempt is + * made to call the Syncable APIs, warn and downgrade. + * Value: {@value}. + */ + public static final String DOWNGRADE_SYNCABLE_EXCEPTIONS = + "fs.s3a.downgrade.syncable.exceptions"; + + /** + * Default value for syncable invocation. + * Value: {@value}. + */ + public static final boolean DOWNGRADE_SYNCABLE_EXCEPTIONS_DEFAULT = + false; + /** * The capacity of executor queues for operations other than block * upload, where {@link #FAST_UPLOAD_ACTIVE_BLOCKS} is used instead. diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java index 4f06981bc2d45..65b9535ba65ea 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java @@ -55,6 +55,7 @@ import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.fs.s3a.commit.CommitConstants; import org.apache.hadoop.fs.s3a.commit.PutTracker; +import org.apache.hadoop.fs.s3a.impl.LogExactlyOnce; import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; import org.apache.hadoop.fs.statistics.DurationTracker; import org.apache.hadoop.fs.statistics.IOStatistics; @@ -62,10 +63,10 @@ import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.util.Progressable; +import static java.util.Objects.requireNonNull; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.Statistic.*; import static org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext.EMPTY_BLOCK_OUTPUT_STREAM_STATISTICS; -import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.emptyStatistics; import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfInvocation; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; @@ -89,10 +90,8 @@ class S3ABlockOutputStream extends OutputStream implements private static final Logger LOG = LoggerFactory.getLogger(S3ABlockOutputStream.class); - private static final String E_NOT_SYNCABLE = "S3A streams are not Syncable"; - - /** Owner FileSystem. */ - private final S3AFileSystem fs; + private static final String E_NOT_SYNCABLE = + "S3A streams are not Syncable. See HADOOP-17597."; /** Object being uploaded. */ private final String key; @@ -136,62 +135,48 @@ class S3ABlockOutputStream extends OutputStream implements /** * Write operation helper; encapsulation of the filesystem operations. */ - private final WriteOperationHelper writeOperationHelper; + private final WriteOperations writeOperationHelper; /** * Track multipart put operation. */ private final PutTracker putTracker; + /** Should Syncable calls be downgraded? */ + private final boolean downgradeSyncableExceptions; + + /** + * Downagraded syncable API calls are only logged at warn + * once across the entire process. + */ + private static final LogExactlyOnce WARN_ON_SYNCABLE = + new LogExactlyOnce(LOG); + /** * An S3A output stream which uploads partitions in a separate pool of * threads; different {@link S3ADataBlocks.BlockFactory} * instances can control where data is buffered. - * - * @param fs S3AFilesystem - * @param key S3 object to work on. - * @param executorService the executor service to use to schedule work - * @param progress report progress in order to prevent timeouts. If - * this object implements {@code ProgressListener} then it will be - * directly wired up to the AWS client, so receive detailed progress - * information. - * @param blockSize size of a single block. - * @param blockFactory factory for creating stream destinations - * @param statistics stats for this stream - * @param writeOperationHelper state of the write operation. - * @param putTracker put tracking for commit support * @throws IOException on any problem */ - S3ABlockOutputStream(S3AFileSystem fs, - String key, - ExecutorService executorService, - Progressable progress, - long blockSize, - S3ADataBlocks.BlockFactory blockFactory, - BlockOutputStreamStatistics statistics, - WriteOperationHelper writeOperationHelper, - PutTracker putTracker) + S3ABlockOutputStream(BlockOutputStreamBuilder builder) throws IOException { - this.fs = fs; - this.key = key; - this.blockFactory = blockFactory; - this.blockSize = (int) blockSize; - this.statistics = statistics != null - ? statistics - : EMPTY_BLOCK_OUTPUT_STREAM_STATISTICS; + builder.validate(); + this.key = builder.key; + this.blockFactory = builder.blockFactory; + this.blockSize = (int) builder.blockSize; + this.statistics = builder.statistics; // test instantiations may not provide statistics; - this.iostatistics = statistics != null - ? statistics.getIOStatistics() - : emptyStatistics(); - this.writeOperationHelper = writeOperationHelper; - this.putTracker = putTracker; - Preconditions.checkArgument(blockSize >= Constants.MULTIPART_MIN_SIZE, - "Block size is too small: %d", blockSize); - this.executorService = MoreExecutors.listeningDecorator(executorService); + this.iostatistics = statistics.getIOStatistics(); + this.writeOperationHelper = builder.writeOperations; + this.putTracker = builder.putTracker; + this.executorService = MoreExecutors.listeningDecorator( + builder.executorService); this.multiPartUpload = null; + final Progressable progress = builder.progress; this.progressListener = (progress instanceof ProgressListener) ? (ProgressListener) progress : new ProgressableListener(progress); + downgradeSyncableExceptions = builder.downgradeSyncableExceptions; // create that first block. This guarantees that an open + close sequence // writes a 0-byte entry. createBlockIfNeeded(); @@ -597,7 +582,7 @@ public String toString() { } private void incrementWriteOperations() { - fs.incrementWriteOperations(); + writeOperationHelper.incrementWriteOperations(); } /** @@ -654,12 +639,31 @@ public boolean hasCapability(String capability) { @Override public void hflush() throws IOException { - throw new UnsupportedOperationException(E_NOT_SYNCABLE); + statistics.hflushInvoked(); + handleSyncableInvocation(); } @Override public void hsync() throws IOException { - throw new UnsupportedOperationException(E_NOT_SYNCABLE); + statistics.hsyncInvoked(); + handleSyncableInvocation(); + } + + /** + * Shared processing of Syncable operation reporting/downgrade. + */ + private void handleSyncableInvocation() { + final UnsupportedOperationException ex + = new UnsupportedOperationException(E_NOT_SYNCABLE); + if (!downgradeSyncableExceptions) { + throw ex; + } + // downgrading. + WARN_ON_SYNCABLE.warn("Application invoked the Syncable API against" + + " stream writing to {}. This is unsupported", + key); + // and log at debug + LOG.debug("Downgrading Syncable call", ex); } @Override @@ -982,4 +986,166 @@ public void progressChanged(ProgressEvent progressEvent) { } } + /** + * Create a builder. + * @return + */ + public static BlockOutputStreamBuilder builder() { + return new BlockOutputStreamBuilder(); + } + + /** + * Builder class for constructing an output stream. + */ + public static final class BlockOutputStreamBuilder { + + /** S3 object to work on. */ + private String key; + + /** The executor service to use to schedule work. */ + private ExecutorService executorService; + + /** + * Report progress in order to prevent timeouts. + * this object implements {@code ProgressListener} then it will be + * directly wired up to the AWS client, so receive detailed progress + * information. + */ + private Progressable progress; + + /** The size of a single block. */ + private long blockSize; + + /** The factory for creating stream destinations. */ + private S3ADataBlocks.BlockFactory blockFactory; + + /** The output statistics for the stream. */ + private BlockOutputStreamStatistics statistics = + EMPTY_BLOCK_OUTPUT_STREAM_STATISTICS; + + /** Operations to write data. */ + private WriteOperations writeOperations; + + /** put tracking for commit support. */ + private PutTracker putTracker; + + /** Should Syncable calls be downgraded? */ + private boolean downgradeSyncableExceptions; + + private BlockOutputStreamBuilder() { + } + + /** + * Validate the arguments. + */ + public void validate() { + requireNonNull(key, "null key"); + requireNonNull(executorService, "null executorService"); + requireNonNull(blockFactory, "null blockFactory"); + requireNonNull(statistics, "null statistics"); + requireNonNull(writeOperations, "null writeOperationHelper"); + requireNonNull(putTracker, "null putTracker"); + Preconditions.checkArgument(blockSize >= Constants.MULTIPART_MIN_SIZE, + "Block size is too small: %s", blockSize); + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public BlockOutputStreamBuilder withKey( + final String value) { + key = value; + return this; + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public BlockOutputStreamBuilder withExecutorService( + final ExecutorService value) { + executorService = value; + return this; + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public BlockOutputStreamBuilder withProgress( + final Progressable value) { + progress = value; + return this; + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public BlockOutputStreamBuilder withBlockSize( + final long value) { + blockSize = value; + return this; + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public BlockOutputStreamBuilder withBlockFactory( + final S3ADataBlocks.BlockFactory value) { + blockFactory = value; + return this; + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public BlockOutputStreamBuilder withStatistics( + final BlockOutputStreamStatistics value) { + statistics = value; + return this; + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public BlockOutputStreamBuilder withWriteOperations( + final WriteOperationHelper value) { + writeOperations = value; + return this; + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public BlockOutputStreamBuilder withPutTracker( + final PutTracker value) { + putTracker = value; + return this; + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public BlockOutputStreamBuilder withDowngradeSyncableExceptions( + final boolean value) { + downgradeSyncableExceptions = value; + return this; + } + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 8db5d51def84e..7f0160a5070f6 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -1348,20 +1348,27 @@ public FSDataOutputStream create(Path f, FsPermission permission, String destKey = putTracker.getDestKey(); final BlockOutputStreamStatistics outputStreamStatistics = statisticsContext.newOutputStreamStatistics(); - return new FSDataOutputStream( - new S3ABlockOutputStream(this, - destKey, + final S3ABlockOutputStream.BlockOutputStreamBuilder builder = + S3ABlockOutputStream.builder() + .withKey(destKey) + .withBlockFactory(blockFactory) + .withBlockSize(partSize) + .withStatistics(outputStreamStatistics) + .withProgress(progress) + .withPutTracker(putTracker) + .withWriteOperations(getWriteOperationHelper()) + .withExecutorService( new SemaphoredDelegatingExecutor( boundedThreadPool, blockOutputActiveBlocks, true, - outputStreamStatistics), - progress, - partSize, - blockFactory, - outputStreamStatistics, - getWriteOperationHelper(), - putTracker), + outputStreamStatistics)) + .withDowngradeSyncableExceptions( + getConf().getBoolean( + DOWNGRADE_SYNCABLE_EXCEPTIONS, + DOWNGRADE_SYNCABLE_EXCEPTIONS_DEFAULT)); + return new FSDataOutputStream( + new S3ABlockOutputStream(builder), null); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java index dd28f3e59e037..169a74abef2d7 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java @@ -1346,7 +1346,9 @@ private OutputStreamStatistics( STREAM_WRITE_EXCEPTIONS_COMPLETING_UPLOADS.getSymbol(), STREAM_WRITE_QUEUE_DURATION.getSymbol(), STREAM_WRITE_TOTAL_DATA.getSymbol(), - STREAM_WRITE_TOTAL_TIME.getSymbol()) + STREAM_WRITE_TOTAL_TIME.getSymbol(), + INVOCATION_HFLUSH.getSymbol(), + INVOCATION_HSYNC.getSymbol()) .withGauges( STREAM_WRITE_BLOCK_UPLOADS_PENDING.getSymbol(), STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING.getSymbol()) @@ -1489,6 +1491,16 @@ public void commitUploaded(long size) { incrementCounter(COMMITTER_BYTES_UPLOADED, size); } + @Override + public void hflushInvoked() { + incCounter(INVOCATION_HFLUSH.getSymbol(), 1); + } + + @Override + public void hsyncInvoked() { + incCounter(INVOCATION_HSYNC.getSymbol(), 1); + } + @Override public void close() { if (getBytesPendingUpload() > 0) { diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java index 1a53f0d1f8797..c613c06c9bb4e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java @@ -137,6 +137,14 @@ public enum Statistic { StoreStatisticNames.OP_IS_FILE, "Calls of isFile()", TYPE_COUNTER), + INVOCATION_HFLUSH( + StoreStatisticNames.OP_HFLUSH, + "Calls of hflush()", + TYPE_COUNTER), + INVOCATION_HSYNC( + StoreStatisticNames.OP_HSYNC, + "Calls of hsync()", + TYPE_COUNTER), INVOCATION_LIST_FILES( StoreStatisticNames.OP_LIST_FILES, "Calls of listFiles()", diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java index 9bdf61c22a1ff..8b71fc3277142 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java @@ -693,4 +693,9 @@ public SelectObjectContentResult select( } }); } + + @Override + public void incrementWriteOperations() { + owner.incrementWriteOperations(); + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java index 09b9cc924c6a4..0a8150ce34dbb 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java @@ -338,4 +338,10 @@ SelectObjectContentResult select( SelectObjectContentRequest request, String action) throws IOException; + + /** + * Increment the write operation counter + * of the filesystem. + */ + void incrementWriteOperations(); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/BlockOutputStreamStatistics.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/BlockOutputStreamStatistics.java index b1cee718c206a..772b965d4f4a3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/BlockOutputStreamStatistics.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/BlockOutputStreamStatistics.java @@ -134,4 +134,14 @@ public interface BlockOutputStreamStatistics extends Closeable, * @return the value or null if no matching gauge was found. */ Long lookupGaugeValue(String name); + + /** + * Syncable.hflush() has been invoked. + */ + void hflushInvoked(); + + /** + * Syncable.hsync() has been invoked. + */ + void hsyncInvoked(); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/EmptyS3AStatisticsContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/EmptyS3AStatisticsContext.java index c8cd80592088b..3a651026a0ba6 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/EmptyS3AStatisticsContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/EmptyS3AStatisticsContext.java @@ -482,6 +482,14 @@ public Long lookupGaugeValue(final String name) { return 0L; } + @Override + public void hflushInvoked() { + } + + @Override + public void hsyncInvoked() { + } + @Override public void close() throws IOException { } diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md index 416793b8ed957..661dd2f36ad2b 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md @@ -22,7 +22,7 @@ Common problems working with S3 are 1. Classpath setup 1. Authentication -1. S3 Inconsistency side-effects +1. Incorrect configuration Troubleshooting IAM Assumed Roles is covered in its @@ -1027,7 +1027,7 @@ at the end of a write operation. If a process terminated unexpectedly, or failed to call the `close()` method on an output stream, the pending data will have been lost. -### File `flush()`, `hsync` and `hflush()` calls do not save data to S3 +### File `flush()` calls do not save data to S3 Again, this is due to the fact that the data is cached locally until the `close()` operation. The S3A filesystem cannot be used as a store of data @@ -1036,6 +1036,39 @@ if it is required that the data is persisted durably after every This includes resilient logging, HBase-style journaling and the like. The standard strategy here is to save to HDFS and then copy to S3. +### `UnsupportedOperationException` "S3A streams are not Syncable. See HADOOP-17597." + +The application has tried to call either the `Syncable.hsync()` or `Syncable.hflush()` +methods on an S3A output stream. This has been rejected because the +connector isn't saving any data at all. The `Syncable` API, especially the +`hsync()` call, are critical for applications such as HBase to safely +persist data. + +The S3A connector throws an `UnsupportedOperationException` when these API calls +are made, because the guarantees absolutely cannot be met: nothing is being flushed +or saved. + +* Applications which intend to invoke the Syncable APIs call `hasCapability("hsync")` on + the stream to see if they are supported. +* Or catch and downgrade `UnsupportedOperationException`. + +These recommendations _apply to all filesystems_. + +To downgrade the S3A connector to simply warning of the use of +`hsync()` or `hflush()` calls, set the option +`fs.s3a.downgrade.syncable.exceptions` to true. + +```xml + + fs.s3a.downgrade.syncable.exceptions + true + +``` + +The count of invocations of the two APIs are collected +in the S3A filesystem Statistics/IOStatistics and so +their use can be monitored. + ### `RemoteFileChangedException` and read-during-overwrite ``` diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestDowngradeSyncable.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestDowngradeSyncable.java new file mode 100644 index 0000000000000..0bcb11a823d3d --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestDowngradeSyncable.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.performance.AbstractS3ACostTest; +import org.apache.hadoop.fs.statistics.IOStatistics; + +import static org.apache.hadoop.fs.s3a.Constants.DOWNGRADE_SYNCABLE_EXCEPTIONS; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBucketOverrides; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticCounter; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OP_HFLUSH; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OP_HSYNC; + + +public class ITestDowngradeSyncable extends AbstractS3ACostTest { + + protected static final Logger LOG = + LoggerFactory.getLogger(ITestDowngradeSyncable.class); + + + public ITestDowngradeSyncable() { + super(false, true, false); + } + + @Override + public Configuration createConfiguration() { + final Configuration conf = super.createConfiguration(); + String bucketName = getTestBucketName(conf); + removeBucketOverrides(bucketName, conf, + DOWNGRADE_SYNCABLE_EXCEPTIONS); + conf.setBoolean(DOWNGRADE_SYNCABLE_EXCEPTIONS, true); + return conf; + } + + @Test + public void testHFlushDowngrade() throws Throwable { + describe("Verify that hflush() calls can be downgraded from fail" + + " to ignore; the relevant counter is updated"); + Path path = methodPath(); + S3AFileSystem fs = getFileSystem(); + final IOStatistics fsIoStats = fs.getIOStatistics(); + assertThatStatisticCounter(fsIoStats, OP_HFLUSH) + .isEqualTo(0); + + try (FSDataOutputStream out = fs.create(path, true)) { + out.write('1'); + // must succeed + out.hflush(); + // stats counter records the downgrade + IOStatistics iostats = out.getIOStatistics(); + LOG.info("IOStats {}", ioStatisticsToString(iostats)); + assertThatStatisticCounter(iostats, OP_HFLUSH) + .isEqualTo(1); + assertThatStatisticCounter(iostats, OP_HSYNC) + .isEqualTo(0); + } + // once closed. the FS will have its stats merged. + assertThatStatisticCounter(fsIoStats, OP_HFLUSH) + .isEqualTo(1); + } + + @Test + public void testHSyncDowngrade() throws Throwable { + describe("Verify that hsync() calls can be downgraded from fail" + + " to ignore; the relevant counter is updated"); + Path path = methodPath(); + S3AFileSystem fs = getFileSystem(); + final IOStatistics fsIoStats = fs.getIOStatistics(); + assertThatStatisticCounter(fsIoStats, OP_HSYNC) + .isEqualTo(0); + + try (FSDataOutputStream out = fs.create(path, true)) { + out.write('1'); + // must succeed + out.hsync(); + // stats counter records the downgrade + IOStatistics iostats = out.getIOStatistics(); + LOG.info("IOStats {}", ioStatisticsToString(iostats)); + assertThatStatisticCounter(iostats, OP_HFLUSH) + .isEqualTo(0); + assertThatStatisticCounter(iostats, OP_HSYNC) + .isEqualTo(1); + } + // once closed. the FS will have its stats merged. + assertThatStatisticCounter(fsIoStats, OP_HSYNC) + .isEqualTo(1); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java index baa4a542c855a..de27411a41ab8 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java @@ -43,8 +43,11 @@ public class TestS3ABlockOutputStream extends AbstractS3AMockTest { private S3ABlockOutputStream stream; - @Before - public void setUp() throws Exception { + /** + * Create an S3A Builder all mocked up from component pieces. + * @return stream builder. + */ + private S3ABlockOutputStream.BlockOutputStreamBuilder mockS3ABuilder() { ExecutorService executorService = mock(ExecutorService.class); Progressable progressable = mock(Progressable.class); S3ADataBlocks.BlockFactory blockFactory = @@ -52,11 +55,26 @@ public void setUp() throws Exception { long blockSize = Constants.DEFAULT_MULTIPART_SIZE; WriteOperationHelper oHelper = mock(WriteOperationHelper.class); PutTracker putTracker = mock(PutTracker.class); - stream = spy(new S3ABlockOutputStream(fs, "", executorService, - progressable, blockSize, blockFactory, null, oHelper, - putTracker)); + final S3ABlockOutputStream.BlockOutputStreamBuilder builder = + S3ABlockOutputStream.builder() + .withBlockFactory(blockFactory) + .withBlockSize(blockSize) + .withExecutorService(executorService) + .withKey("") + .withProgress(progressable) + .withPutTracker(putTracker) + .withWriteOperations(oHelper); + return builder; + } + + @Before + public void setUp() throws Exception { + final S3ABlockOutputStream.BlockOutputStreamBuilder + builder = mockS3ABuilder(); + stream = spy(new S3ABlockOutputStream(builder)); } + @Test public void testFlushNoOpWhenStreamClosed() throws Exception { doThrow(new IOException()).when(stream).checkOpen(); @@ -108,4 +126,31 @@ public void testCallingCloseAfterCallingAbort() throws Exception { // This will ensure abort() can be called with try-with-resource. stream.close(); } + + + /** + * Unless configured to downgrade, the stream will raise exceptions on + * Syncable API calls. + */ + @Test + public void testSyncableUnsupported() throws Exception { + intercept(UnsupportedOperationException.class, () -> stream.hflush()); + intercept(UnsupportedOperationException.class, () -> stream.hsync()); + } + + /** + * When configured to downgrade, the stream downgrades on + * Syncable API calls. + */ + @Test + public void testSyncableDowngrade() throws Exception { + final S3ABlockOutputStream.BlockOutputStreamBuilder + builder = mockS3ABuilder(); + builder.withDowngradeSyncableExceptions(true); + stream = spy(new S3ABlockOutputStream(builder)); + + stream.hflush(); + stream.hsync(); + } + } From dc2bb8a66c6454f10e68decd1f3eebcc951f8c8d Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 23 Apr 2021 22:48:33 +0200 Subject: [PATCH 261/666] YARN-10123. Error message around yarn app -stop/start can be improved. Contributed by Siddharth Ahuja This is required to highlight that an implementation at framework level is needed for the stop/start functionality to work. --- .../hadoop/yarn/client/api/AppAdminClient.java | 4 +++- .../src/site/markdown/YarnCommands.md | 18 +++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java index 2fd59a863c87b..b9f72484a5d0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java @@ -74,7 +74,9 @@ public static AppAdminClient createAppAdminClient(String appType, } if (!clientClassMap.containsKey(appType)) { throw new IllegalArgumentException("App admin client class name not " + - "specified for type " + appType); + "specified for type " + appType + ". Please ensure " + appType + + " framework has provided a client implementation to handle" + + " this functionality."); } String clientClassName = clientClassMap.get(appType); Class clientClass; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md index 3082ff81bcb15..123ff6b7c185a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md @@ -51,21 +51,21 @@ Usage: `yarn app [options] ` | -changeQueue \ | Moves application to a new queue. ApplicationId can be passed using 'appId' option. 'movetoqueue' command is deprecated, this new command 'changeQueue' performs same functionality. | | -component \ \ | Works with -flex option to change the number of components/containers running for an application / long-running service. Supports absolute or relative changes, such as +1, 2, or -3. | | -components \ | Works with -upgrade option to trigger the upgrade of specified components of the application. Multiple components should be separated by commas. | -| -decommission \ | Decommissions component instances for an application / long-running service. Requires -instances option. Supports -appTypes option to specify which client implementation to use. | -| -destroy \ | Destroys a saved application specification and removes all application data permanently. Supports -appTypes option to specify which client implementation to use. | -| -enableFastLaunch | Uploads AM dependencies to HDFS to make future launches faster. Supports -appTypes option to specify which client implementation to use. | -| -flex \ | Changes number of running containers for a component of an application / long-running service. Requires -component option. If name is provided, appType must be provided unless it is the default yarn-service. If ID is provided, the appType will be looked up. Supports -appTypes option to specify which client implementation to use. | +| -decommission \ | Decommissions component instances for an application / long-running service. Requires -instances option. Supports -appTypes option to specify which client implementation to use. Please ensure the framework corresponding to appType has provided the appropriate client implementation to handle this specific functionality. | +| -destroy \ | Destroys a saved application specification and removes all application data permanently. Supports -appTypes option to specify which client implementation to use. Please ensure the framework corresponding to appType has provided the appropriate client implementation to handle this specific functionality. | +| -enableFastLaunch | Uploads AM dependencies to HDFS to make future launches faster. Supports -appTypes option to specify which client implementation to use. Please ensure the framework corresponding to appType has provided the appropriate client implementation to handle this specific functionality. | +| -flex \ | Changes number of running containers for a component of an application / long-running service. Requires -component option. If name is provided, appType must be provided unless it is the default yarn-service. If ID is provided, the appType will be looked up. Supports -appTypes option to specify which client implementation to use. Please ensure the framework corresponding to appType has provided the appropriate client implementation to handle this specific functionality. | | -help | Displays help for all commands. | | -instances \ | Works with -upgrade option to trigger the upgrade of specified component instances of the application. Also works with -decommission option to decommission specified component instances. Multiple instances should be separated by commas. | | -kill \ | Kills the application. Set of applications can be provided separated with space | -| -launch \ \ | Launches application from specification file (saves specification and starts application). Options -updateLifetime and -changeQueue can be specified to alter the values provided in the file. Supports -appTypes option to specify which client implementation to use. | +| -launch \ \ | Launches application from specification file (saves specification and starts application). Options -updateLifetime and -changeQueue can be specified to alter the values provided in the file. Supports -appTypes option to specify which client implementation to use. Please ensure the framework corresponding to appType has provided the appropriate client implementation to handle this specific functionality. | | -list | List applications. Supports optional use of -appTypes to filter applications based on application type, -appStates to filter applications based on application state and -appTags to filter applications based on application tag. | | -movetoqueue \ | Moves the application to a different queue. Deprecated command. Use 'changeQueue' instead. | | -queue \ | Works with the movetoqueue command to specify which queue to move an application to. | -| -save \ \ | Saves specification file for an application. Options -updateLifetime and -changeQueue can be specified to alter the values provided in the file. Supports -appTypes option to specify which client implementation to use. | -| -start \ | Starts a previously saved application. Supports -appTypes option to specify which client implementation to use. | -| -status \ | Prints the status of the application. If app ID is provided, it prints the generic YARN application status. If name is provided, it prints the application specific status based on app's own implementation, and -appTypes option must be specified unless it is the default `yarn-service` type.| -| -stop \ | Stops application gracefully (may be started again later). If name is provided, appType must be provided unless it is the default yarn-service. If ID is provided, the appType will be looked up. Supports -appTypes option to specify which client implementation to use. | +| -save \ \ | Saves specification file for an application. Options -updateLifetime and -changeQueue can be specified to alter the values provided in the file. Supports -appTypes option to specify which client implementation to use. Please ensure the framework corresponding to appType has provided the appropriate client implementation to handle this specific functionality. | +| -start \ | Starts a previously saved application. Supports -appTypes option to specify which client implementation to use. Please ensure the framework corresponding to appType has provided the appropriate client implementation to handle this specific functionality. | +| -status \ | Prints the status of the application. If app ID is provided, it prints the generic YARN application status. If name is provided, it prints the application specific status based on app's own implementation, and -appTypes option must be specified unless it is the default `yarn-service` type. Please ensure the framework corresponding to appType has provided the appropriate client implementation to handle this specific functionality.| +| -stop \ | Stops application gracefully (may be started again later). If name is provided, appType must be provided unless it is the default yarn-service. If ID is provided, the appType will be looked up. Supports -appTypes option to specify which client implementation to use. Please ensure the framework corresponding to appType has provided the appropriate client implementation to handle this specific functionality. | | -updateLifetime \ | Update timeout of an application from NOW. ApplicationId can be passed using 'appId' option. Timeout value is in seconds. | | -updatePriority \ | Update priority of an application. ApplicationId can be passed using 'appId' option. | From cfb85034baf677435c35fe7090a85c4a619a2067 Mon Sep 17 00:00:00 2001 From: Jim Brennan Date: Fri, 23 Apr 2021 21:35:09 +0000 Subject: [PATCH 262/666] YARN-10743. Add a policy for not aggregating for containers which are killed because exceeding container log size limit. Contributed by Qi Zhu. --- .../api/records/LogAggregationContext.java | 2 ++ ...imitSizeContainerLogAggregationPolicy.java | 32 +++++++++++++++++++ .../TestLogAggregationService.java | 30 +++++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LimitSizeContainerLogAggregationPolicy.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java index 5ac7d2dd5e8ae..e58012d68b850 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java @@ -76,6 +76,8 @@ * SampleContainerLogAggregationPolicy: sample logs of successful worker * containers, in addition to application master and failed/killed * containers. + * LimitSizeContainerLogAggregationPolicy: skip aggregation for killed + * containers whose log size exceeds the limit of container log size. * If it isn't specified, it will use the cluster-wide default policy * defined by configuration yarn.nodemanager.log-aggregation.policy.class. * The default value of yarn.nodemanager.log-aggregation.policy.class is diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LimitSizeContainerLogAggregationPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LimitSizeContainerLogAggregationPolicy.java new file mode 100644 index 0000000000000..bda175f48a83c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LimitSizeContainerLogAggregationPolicy.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.yarn.api.records.ContainerExitStatus; +import org.apache.hadoop.yarn.server.api.ContainerLogContext; + +@Private +public class LimitSizeContainerLogAggregationPolicy extends + AbstractContainerLogAggregationPolicy { + public boolean shouldDoLogAggregation(ContainerLogContext logContext) { + return logContext.getExitCode() + != ContainerExitStatus.KILLED_FOR_EXCESS_LOGS; + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java index e5ecb724f8c7f..4cc9ac1f3a7a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java @@ -99,6 +99,7 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.Event; @@ -1868,6 +1869,35 @@ public void testFailedContainerPolicy() throws Exception { verifyLogAggFinishEvent(appId); } + @Test (timeout = 50000) + @SuppressWarnings("unchecked") + public void testLimitSizeContainerLogAggregationPolicy() throws Exception { + ApplicationId appId = createApplication(); + LogAggregationService logAggregationService = createLogAggregationService( + appId, LimitSizeContainerLogAggregationPolicy.class, null); + + String[] logFiles = new String[] {"stdout" }; + // exitCode KILLED_FOR_EXCESS_LOGS + finishContainer( + appId, logAggregationService, ContainerType.APPLICATION_MASTER, 1, + ContainerExitStatus.KILLED_FOR_EXCESS_LOGS, + logFiles); + ContainerId container2 = + finishContainer(appId, logAggregationService, ContainerType.TASK, 2, 0, + logFiles); + ContainerId container3 = + finishContainer(appId, logAggregationService, ContainerType.TASK, 3, + ContainerExecutor.ExitCode.FORCE_KILLED.getExitCode(), logFiles); + + finishApplication(appId, logAggregationService); + + verifyContainerLogs(logAggregationService, appId, + new ContainerId[] {container2, container3}, + logFiles, 2, false, EMPTY_FILES); + + verifyLogAggFinishEvent(appId); + } + @Test (timeout = 50000) @SuppressWarnings("unchecked") public void testAMOrFailedContainerPolicy() throws Exception { From 0876b75fabb34b630805a84b8ce3da31bca474c5 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Fri, 23 Apr 2021 22:02:04 +0000 Subject: [PATCH 263/666] YARN-10479. Can't remove all node labels after add node label without nodemanager port, broken by YARN-10647. Contributed by D M Murali Krishna Reddy --- .../yarn/nodelabels/CommonNodeLabelsManager.java | 15 ++++++++++++--- .../nodelabels/RMNodeLabelsManager.java | 2 +- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java index c19faaaad3fcd..7dadaeb379047 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java @@ -95,6 +95,8 @@ public class CommonNodeLabelsManager extends AbstractService { new ConcurrentHashMap(); protected ConcurrentMap nodeCollections = new ConcurrentHashMap(); + private ConcurrentMap isNodeLabelFromHost = + new ConcurrentHashMap(); protected RMNodeLabel noNodeLabel; @@ -603,6 +605,11 @@ private void replaceLabelsForNode(NodeId node, Set oldLabels, addLabelsToNodeInHost(node, newLabels); } + protected boolean isNodeLabelExplicit(NodeId nodeId) { + return !isNodeLabelFromHost.containsKey(nodeId) || + isNodeLabelFromHost.get(nodeId); + } + @SuppressWarnings("unchecked") protected void internalUpdateLabelsOnNodes( Map> nodeToLabels, NodeLabelUpdateOperation op) @@ -637,6 +644,7 @@ protected void internalUpdateLabelsOnNodes( node.labels.addAll(labels); } addNodeToLabels(node.nodeId, labels); + isNodeLabelFromHost.put(node.nodeId, true); } break; case REPLACE: @@ -646,10 +654,9 @@ protected void internalUpdateLabelsOnNodes( host.labels.addAll(labels); for (Node node : host.nms.values()) { replaceNodeForLabels(node.nodeId, node.labels, labels); - if (node.labels != null) { - replaceLabelsForNode(node.nodeId, node.labels, labels); - } + replaceLabelsForNode(node.nodeId, node.labels, labels); node.labels = null; + isNodeLabelFromHost.put(node.nodeId, true); } break; default: @@ -669,6 +676,7 @@ protected void internalUpdateLabelsOnNodes( nm.labels = new HashSet(); } nm.labels.addAll(labels); + isNodeLabelFromHost.put(nm.nodeId, false); break; case REPLACE: oldLabels = getLabelsByNode(nodeId); @@ -679,6 +687,7 @@ protected void internalUpdateLabelsOnNodes( } nm.labels.clear(); nm.labels.addAll(labels); + isNodeLabelFromHost.put(nm.nodeId, false); break; default: break; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java index 1a08e03e699cc..57f5803f9b457 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java @@ -275,7 +275,7 @@ public void deactivateNode(NodeId nodeId) { Map before = cloneNodeMap(ImmutableSet.of(nodeId)); Node nm = getNMInNodeSet(nodeId); if (null != nm) { - if (null == nm.labels) { + if (isNodeLabelExplicit(nm.nodeId)) { // When node deactivated, remove the nm from node collection if no // labels explicitly set for this particular nm From 1748de94d60cbfae9adbec462d84591f148f140c Mon Sep 17 00:00:00 2001 From: jianghuazhu <740087514@qq.com> Date: Sun, 25 Apr 2021 14:31:51 +0800 Subject: [PATCH 264/666] HDFS-15978. Solve DatanodeManager#getBlockRecoveryCommand() printing IOException. (#2913) Contributed by JiangHua Zhu. --- .../hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 6939e65ba80a1..8e5a24ba60a26 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1658,7 +1658,7 @@ private BlockRecoveryCommand getBlockRecoveryCommand(String blockPoolId, BlockUnderConstructionFeature uc = b.getUnderConstructionFeature(); if(uc == null) { throw new IOException("Recovery block " + b + - "where it is not under construction."); + " where it is not under construction."); } final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); // Skip stale nodes during recovery From 87c3363def6cb6e9d59ecdd16169da557dfe342a Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Sun, 25 Apr 2021 18:33:55 +0100 Subject: [PATCH 265/666] HADOOP-17112. S3A committers can't handle whitespace in paths. (#2953) Contributed by Krzysztof Adamski. --- .../java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java | 2 +- .../org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java index c09278fbdcf32..cd8257f97fb91 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java @@ -485,7 +485,7 @@ public SinglePendingCommit uploadFileToPendingCommit(File localFile, if (!localFile.isFile()) { throw new FileNotFoundException("Not a file: " + localFile); } - String destURI = destPath.toString(); + String destURI = destPath.toUri().toString(); String destKey = fs.pathToKey(destPath); String uploadId = null; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java index 4d7f81d019b74..ab2f9a2d316ee 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java @@ -673,7 +673,7 @@ public void testBulkCommitFiles() throws Throwable { Path subdir = new Path(destDir, "subdir"); // file 2 Path destFile2 = new Path(subdir, "file2"); - Path destFile3 = new Path(subdir, "file3"); + Path destFile3 = new Path(subdir, "file3 with space"); List destinations = Lists.newArrayList(destFile1, destFile2, destFile3); List commits = new ArrayList<>(3); From 2443a959cd7f26927be38b6b855fa5150c7e7959 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 26 Apr 2021 11:27:15 +0800 Subject: [PATCH 266/666] Revert "HDFS-15624. fix the function of setting quota by storage type (#2377)" This reverts commit 394b9f7a5c42ffa38bf7c3a44e7d50a4d19414f9. Ref: HDFS-15995. Had to revert this commit, so we can commit HDFS-15566 (a critical bug preventing rolling upgrade to Hadoop 3.3) Will re-work this fix again later. --- .../java/org/apache/hadoop/fs/StorageType.java | 7 ++++--- .../java/org/apache/hadoop/fs/shell/TestCount.java | 4 ++-- .../server/federation/router/TestRouterQuota.java | 14 +++++++------- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 7 ------- .../server/namenode/NameNodeLayoutVersion.java | 3 +-- .../apache/hadoop/hdfs/TestBlockStoragePolicy.java | 6 +++--- .../hadoop/hdfs/protocol/TestLayoutVersion.java | 3 +-- 7 files changed, 18 insertions(+), 26 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java index 2b5e4f1c5cee2..b17864a22282a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java @@ -34,12 +34,13 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public enum StorageType { + // sorted by the speed of the storage types, from fast to slow RAM_DISK(true, true), + NVDIMM(false, true), SSD(false, false), DISK(false, false), ARCHIVE(false, false), - PROVIDED(false, false), - NVDIMM(false, true); + PROVIDED(false, false); private final boolean isTransient; private final boolean isRAM; @@ -121,4 +122,4 @@ public static String getConf(Configuration conf, StorageType t, String name) { return conf.get(CONF_KEY_HEADER + t.toString() + "." + name); } -} +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java index 19516b8c40799..618cb0190a99a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java @@ -283,10 +283,10 @@ public void processPathWithQuotasByStorageTypesHeader() throws Exception { count.processOptions(options); String withStorageTypeHeader = // <----13---> <-------17------> <----13-----> <------17-------> + " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + " SSD_QUOTA REM_SSD_QUOTA DISK_QUOTA REM_DISK_QUOTA " + // <----13---> <-------17------> "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA PROVIDED_QUOTA REM_PROVIDED_QUOTA " + - " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + "PATHNAME"; verify(out).println(withStorageTypeHeader); verifyNoMoreInteractions(out); @@ -338,11 +338,11 @@ public void processPathWithQuotasByQTVH() throws Exception { count.processOptions(options); String withStorageTypeHeader = // <----13---> <-------17------> + " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + " SSD_QUOTA REM_SSD_QUOTA " + " DISK_QUOTA REM_DISK_QUOTA " + "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " + "PROVIDED_QUOTA REM_PROVIDED_QUOTA " + - " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + "PATHNAME"; verify(out).println(withStorageTypeHeader); verifyNoMoreInteractions(out); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java index b69004198eb48..551ae8a8e0612 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java @@ -414,13 +414,13 @@ public void testStorageTypeQuota() throws Exception { QuotaUsage usage = client.getQuotaUsage("/type0"); assertEquals(HdfsConstants.QUOTA_RESET, usage.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, usage.getSpaceQuota()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota * 2, -1, -1, -1}, - null, usage); + verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota * 2, -1, -1}, null, + usage); // Verify /type1 quota on NN1. usage = client.getQuotaUsage("/type1"); assertEquals(HdfsConstants.QUOTA_RESET, usage.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, usage.getSpaceQuota()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota, -1, -1, -1}, null, + verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota, -1, -1}, null, usage); FileSystem routerFs = routerContext.getFileSystem(); @@ -431,15 +431,15 @@ public void testStorageTypeQuota() throws Exception { assertEquals(2, u1.getFileAndDirectoryCount()); assertEquals(HdfsConstants.QUOTA_RESET, u1.getSpaceQuota()); assertEquals(fileSize * 3, u1.getSpaceConsumed()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota, -1, -1, -1}, - new long[] {0, 0, fileSize * 3, 0, 0, 0}, u1); + verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota, -1, -1}, + new long[] {0, 0, 0, fileSize * 3, 0, 0}, u1); // Verify /type0 storage type quota usage on Router. assertEquals(HdfsConstants.QUOTA_RESET, u0.getQuota()); assertEquals(4, u0.getFileAndDirectoryCount()); assertEquals(HdfsConstants.QUOTA_RESET, u0.getSpaceQuota()); assertEquals(fileSize * 3 * 2, u0.getSpaceConsumed()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota * 2, -1, -1, -1}, - new long[] {0, 0, fileSize * 3 * 2, 0, 0, 0}, u0); + verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota * 2, -1, -1}, + new long[] {0, 0, 0, fileSize * 3 * 2, 0, 0}, u0); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index e559515696d66..926c4e6482414 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2429,10 +2429,6 @@ private void checkStoragePolicyEnabled(final String operationNameReadable, * @throws IOException */ void setStoragePolicy(String src, String policyName) throws IOException { - if (policyName.equalsIgnoreCase( - HdfsConstants.ALLNVDIMM_STORAGE_POLICY_NAME)) { - requireEffectiveLayoutVersionForFeature(Feature.NVDIMM_SUPPORT); - } final String operationName = "setStoragePolicy"; checkOperation(OperationCategory.WRITE); checkStoragePolicyEnabled("set storage policy", true); @@ -3575,9 +3571,6 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) if (type != null) { requireEffectiveLayoutVersionForFeature(Feature.QUOTA_BY_STORAGE_TYPE); } - if (type == StorageType.NVDIMM) { - requireEffectiveLayoutVersionForFeature(Feature.NVDIMM_SUPPORT); - } checkOperation(OperationCategory.WRITE); final String operationName = getQuotaCommand(nsQuota, ssQuota); final FSPermissionChecker pc = getPermissionChecker(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java index b2477466be9f1..297ca74c5e111 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java @@ -89,8 +89,7 @@ public enum Feature implements LayoutFeature { APPEND_NEW_BLOCK(-62, -61, "Support appending to new block"), QUOTA_BY_STORAGE_TYPE(-63, -61, "Support quota for specific storage types"), ERASURE_CODING(-64, -61, "Support erasure coding"), - EXPANDED_STRING_TABLE(-65, -61, "Support expanded string table in fsimage"), - NVDIMM_SUPPORT(-66, -61, "Support NVDIMM storage type"); + EXPANDED_STRING_TABLE(-65, -61, "Support expanded string table in fsimage"); private final FeatureInfo info; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index ef116957fb84d..efb4b4df358f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -1421,29 +1421,29 @@ public void testStorageType() { final EnumMap map = new EnumMap<>(StorageType.class); //put storage type is reversed order - map.put(StorageType.NVDIMM, 1); map.put(StorageType.ARCHIVE, 1); map.put(StorageType.DISK, 1); map.put(StorageType.SSD, 1); map.put(StorageType.RAM_DISK, 1); + map.put(StorageType.NVDIMM, 1); { final Iterator i = map.keySet().iterator(); Assert.assertEquals(StorageType.RAM_DISK, i.next()); + Assert.assertEquals(StorageType.NVDIMM, i.next()); Assert.assertEquals(StorageType.SSD, i.next()); Assert.assertEquals(StorageType.DISK, i.next()); Assert.assertEquals(StorageType.ARCHIVE, i.next()); - Assert.assertEquals(StorageType.NVDIMM, i.next()); } { final Iterator> i = map.entrySet().iterator(); Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey()); + Assert.assertEquals(StorageType.NVDIMM, i.next().getKey()); Assert.assertEquals(StorageType.SSD, i.next().getKey()); Assert.assertEquals(StorageType.DISK, i.next().getKey()); Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey()); - Assert.assertEquals(StorageType.NVDIMM, i.next().getKey()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java index 316480961a9ee..2c9905d6fca4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java @@ -128,8 +128,7 @@ public void testNameNodeFeatureMinimumCompatibleLayoutVersions() { NameNodeLayoutVersion.Feature.APPEND_NEW_BLOCK, NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE, NameNodeLayoutVersion.Feature.ERASURE_CODING, - NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE, - NameNodeLayoutVersion.Feature.NVDIMM_SUPPORT); + NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE); for (LayoutFeature f : compatibleFeatures) { assertEquals(String.format("Expected minimum compatible layout version " + "%d for feature %s.", baseLV, f), baseLV, From 22d75d6ab9b947a95eea73698f8ffa97274883ba Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Mon, 26 Apr 2021 11:29:41 +0800 Subject: [PATCH 267/666] HDFS-15566. NN restart fails after RollingUpgrade from 3.1.3/3.2.1 to 3.3.0. Contributed by Brahma Reddy Battula. Signed-off-by: Wei-Chiu Chuang --- .../hdfs/server/namenode/FSEditLogOp.java | 58 +++++++++++++++---- .../namenode/NameNodeLayoutVersion.java | 3 +- .../hdfs/protocol/TestLayoutVersion.java | 3 +- 3 files changed, 52 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index feff8b48f7e21..69c95b0521e01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -3477,17 +3477,30 @@ CreateSnapshotOp setSnapshotMTime(long mTime) { void readFields(DataInputStream in, int logVersion) throws IOException { snapshotRoot = FSImageSerialization.readString(in); snapshotName = FSImageSerialization.readString(in); - mtime = FSImageSerialization.readLong(in); - + if (NameNodeLayoutVersion + .supports(NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME, + logVersion)) { + mtime = FSImageSerialization.readLong(in); + } // read RPC ids if necessary readRpcIds(in, logVersion); } @Override public void writeFields(DataOutputStream out) throws IOException { + throw new IOException("Unsupported without logversion"); + } + + @Override + public void writeFields(DataOutputStream out, int logVersion) + throws IOException { FSImageSerialization.writeString(snapshotRoot, out); FSImageSerialization.writeString(snapshotName, out); - FSImageSerialization.writeLong(mtime, out); + if (NameNodeLayoutVersion + .supports(NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME, + logVersion)) { + FSImageSerialization.writeLong(mtime, out); + } writeRpcIds(rpcClientId, rpcCallId, out); } @@ -3569,17 +3582,30 @@ DeleteSnapshotOp setSnapshotMTime(long mTime) { void readFields(DataInputStream in, int logVersion) throws IOException { snapshotRoot = FSImageSerialization.readString(in); snapshotName = FSImageSerialization.readString(in); - mtime = FSImageSerialization.readLong(in); - + if (NameNodeLayoutVersion + .supports(NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME, + logVersion)) { + mtime = FSImageSerialization.readLong(in); + } // read RPC ids if necessary readRpcIds(in, logVersion); } @Override public void writeFields(DataOutputStream out) throws IOException { + throw new IOException("Unsupported without logversion"); + } + + @Override + public void writeFields(DataOutputStream out, int logVersion) + throws IOException { FSImageSerialization.writeString(snapshotRoot, out); FSImageSerialization.writeString(snapshotName, out); - FSImageSerialization.writeLong(mtime, out); + if (NameNodeLayoutVersion + .supports(NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME, + logVersion)) { + FSImageSerialization.writeLong(mtime, out); + } writeRpcIds(rpcClientId, rpcCallId, out); } @@ -3670,19 +3696,31 @@ void readFields(DataInputStream in, int logVersion) throws IOException { snapshotRoot = FSImageSerialization.readString(in); snapshotOldName = FSImageSerialization.readString(in); snapshotNewName = FSImageSerialization.readString(in); - mtime = FSImageSerialization.readLong(in); - + if (NameNodeLayoutVersion + .supports(NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME, + logVersion)) { + mtime = FSImageSerialization.readLong(in); + } // read RPC ids if necessary readRpcIds(in, logVersion); } @Override public void writeFields(DataOutputStream out) throws IOException { + throw new IOException("Unsupported without logversion"); + } + + @Override + public void writeFields(DataOutputStream out, int logVersion) + throws IOException { FSImageSerialization.writeString(snapshotRoot, out); FSImageSerialization.writeString(snapshotOldName, out); FSImageSerialization.writeString(snapshotNewName, out); - FSImageSerialization.writeLong(mtime, out); - + if (NameNodeLayoutVersion + .supports(NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME, + logVersion)) { + FSImageSerialization.writeLong(mtime, out); + } writeRpcIds(rpcClientId, rpcCallId, out); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java index 297ca74c5e111..bcb3714268aa2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java @@ -89,7 +89,8 @@ public enum Feature implements LayoutFeature { APPEND_NEW_BLOCK(-62, -61, "Support appending to new block"), QUOTA_BY_STORAGE_TYPE(-63, -61, "Support quota for specific storage types"), ERASURE_CODING(-64, -61, "Support erasure coding"), - EXPANDED_STRING_TABLE(-65, -61, "Support expanded string table in fsimage"); + EXPANDED_STRING_TABLE(-65, -61, "Support expanded string table in fsimage"), + SNAPSHOT_MODIFICATION_TIME(-66, -61, "Support modification time for snapshot"); private final FeatureInfo info; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java index 2c9905d6fca4b..96629425f8d55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java @@ -128,7 +128,8 @@ public void testNameNodeFeatureMinimumCompatibleLayoutVersions() { NameNodeLayoutVersion.Feature.APPEND_NEW_BLOCK, NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE, NameNodeLayoutVersion.Feature.ERASURE_CODING, - NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE); + NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE, + NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME); for (LayoutFeature f : compatibleFeatures) { assertEquals(String.format("Expected minimum compatible layout version " + "%d for feature %s.", baseLV, f), baseLV, From 94ee2068b28856a1814e8bfa4adb68491c9b9afd Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Mon, 26 Apr 2021 13:29:28 +0900 Subject: [PATCH 268/666] HDFS-15967. Improve the log for Short Circuit Local Reads. Contributed by Bhavik Patel. --- .../hadoop/hdfs/server/datanode/DataNode.java | 51 +++++++------------ .../fsdataset/impl/FsDatasetImpl.java | 17 +++---- 2 files changed, 24 insertions(+), 44 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 742b815151d53..5a3a032a658f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1129,7 +1129,7 @@ private synchronized void initDirectoryScanner(Configuration conf) { directoryScanner = new DirectoryScanner(data, conf); directoryScanner.start(); } else { - LOG.info("Periodic Directory Tree Verification scan " + + LOG.warn("Periodic Directory Tree Verification scan " + "is disabled because {}", reason); } @@ -1331,21 +1331,6 @@ public void reportCorruptedBlocks( } } - /** - * Try to send an error report to the NNs associated with the given - * block pool. - * @param bpid the block pool ID - * @param errCode error code to send - * @param errMsg textual message to send - */ - void trySendErrorReport(String bpid, int errCode, String errMsg) { - BPOfferService bpos = blockPoolManager.get(bpid); - if (bpos == null) { - throw new IllegalArgumentException("Bad block pool: " + bpid); - } - bpos.trySendErrorReport(errCode, errMsg); - } - /** * Return the BPOfferService instance corresponding to the given block. * @return the BPOS @@ -2033,7 +2018,7 @@ private void checkBlockToken(ExtendedBlock block, ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); id.readFields(in); - LOG.debug("Got: {}", id); + LOG.debug("BlockTokenIdentifier id: {}", id); blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode, null, null); } @@ -2256,8 +2241,8 @@ private void handleDiskError(String failedVolumes, int failedNumber) { return; // do not shutdown } - LOG.warn("DataNode is shutting down due to failed volumes: [" - + failedVolumes + "]"); + LOG.warn("DataNode is shutting down due to failed volumes: [{}]", + failedVolumes); shouldRun = false; } @@ -2299,7 +2284,7 @@ void incrDatanodeNetworkErrors(String host) { curCount.put("networkErrors", curCount.get("networkErrors") + 1L); datanodeNetworkCounts.put(host, curCount); } catch (ExecutionException e) { - LOG.warn("failed to increment network error counts for " + host); + LOG.warn("failed to increment network error counts for host: {}", host); } } } @@ -2349,7 +2334,7 @@ private void reportBadBlock(final BPOfferService bpos, final ExtendedBlock block, final String msg) { FsVolumeSpi volume = getFSDataset().getVolume(block); if (volume == null) { - LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block); + LOG.warn("Cannot find FsVolumeSpi to report bad block: {}", block); return; } bpos.reportBadBlocks( @@ -2430,7 +2415,7 @@ void transferBlocks(String poolId, Block blocks[], transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i], xferTargetStorageTypes[i], xferTargetStorageIDs[i]); } catch (IOException ie) { - LOG.warn("Failed to transfer block " + blocks[i], ie); + LOG.warn("Failed to transfer block {}", blocks[i], ie); } } } @@ -2549,15 +2534,13 @@ private class DataTransfer implements Runnable { DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes, String[] targetStorageIds, ExtendedBlock b, BlockConstructionStage stage, final String clientname) { - if (DataTransferProtocol.LOG.isDebugEnabled()) { - DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " + - "clientname={}, targets={}, target storage types={}, " + - "target storage IDs={}", getClass().getSimpleName(), b, - b.getNumBytes(), stage, clientname, Arrays.asList(targets), - targetStorageTypes == null ? "[]" : - Arrays.asList(targetStorageTypes), - targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds)); - } + DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " + + "clientname={}, targets={}, target storage types={}, " + + "target storage IDs={}", getClass().getSimpleName(), b, + b.getNumBytes(), stage, clientname, Arrays.asList(targets), + targetStorageTypes == null ? "[]" : + Arrays.asList(targetStorageTypes), + targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds)); this.targets = targets; this.targetStorageTypes = targetStorageTypes; this.targetStorageIds = targetStorageIds; @@ -2661,7 +2644,7 @@ public void run() { LOG.warn("{}:Failed to transfer {} to {} got", bpReg, b, targets[0], ie); } catch (Throwable t) { - LOG.error("Failed to transfer block " + b, t); + LOG.error("Failed to transfer block {}", b, t); } finally { decrementXmitsInProgress(); IOUtils.closeStream(blockSender); @@ -3103,7 +3086,7 @@ private void checkReadAccess(final ExtendedBlock block) throws IOException { } for (TokenIdentifier tokenId : tokenIds) { BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId; - LOG.debug("Got: {}", id); + LOG.debug("BlockTokenIdentifier: {}", id); blockPoolTokenSecretManager.checkAccess(id, null, block, BlockTokenIdentifier.AccessMode.READ, null, null); } @@ -3143,8 +3126,10 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b, b.setGenerationStamp(storedGS); if (data.isValidRbw(b)) { stage = BlockConstructionStage.TRANSFER_RBW; + LOG.debug("Replica is being written!"); } else if (data.isValidBlock(b)) { stage = BlockConstructionStage.TRANSFER_FINALIZED; + LOG.debug("Replica is finalized!"); } else { final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId()); throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index d148f774b98b2..0e7b89a4c4d87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -2262,9 +2262,7 @@ ReplicaInfo validateBlockFile(String bpid, long blockId) { datanode.checkDiskErrorAsync(r.getVolume()); } - if (LOG.isDebugEnabled()) { - LOG.debug("blockId=" + blockId + ", replica=" + r); - } + LOG.debug("blockId={}, replica={}", blockId, r); return null; } @@ -2334,15 +2332,12 @@ private void invalidate(String bpid, Block[] invalidBlks, boolean async) continue; } } catch(IllegalArgumentException e) { - LOG.warn("Parent directory check failed; replica " + info - + " is not backed by a local file"); + LOG.warn("Parent directory check failed; replica {} is " + + "not backed by a local file", info); } removing = volumeMap.remove(bpid, invalidBlks[i]); addDeletingBlock(bpid, removing.getBlockId()); - if (LOG.isDebugEnabled()) { - LOG.debug("Block file " + removing.getBlockURI() - + " is to be deleted"); - } + LOG.debug("Block file {} is to be deleted", removing.getBlockURI()); if (removing instanceof ReplicaInPipeline) { ((ReplicaInPipeline) removing).releaseAllBytesReserved(); } @@ -2383,8 +2378,8 @@ private void invalidate(String bpid, Block[] invalidBlks, boolean async) dataStorage.getTrashDirectoryForReplica(bpid, removing)); } } catch (ClosedChannelException e) { - LOG.warn("Volume " + v + " is closed, ignore the deletion task for " + - "block " + invalidBlks[i]); + LOG.warn("Volume {} is closed, ignore the deletion task for " + + "block: {}", v, invalidBlks[i]); } } if (!errors.isEmpty()) { From 6fd40c6400b9c50296a41f24136a8813d8a9ad87 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Sun, 25 Apr 2021 23:04:52 -0700 Subject: [PATCH 269/666] HADOOP-17661. mvn versions:set fails to parse pom.xml. (#2952) Reviewed-by: Akira Ajisaka Reviewed-by: Hui Fei --- hadoop-project/pom.xml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index e76a1d65f2ea9..b6854cea2e500 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -2294,11 +2294,11 @@ true ${javac.version} ${javac.version} - - - - 9999 - + + -Xlint + -Xlint:unchecked + -Xmaxwarns=9999 + From 6a7a79d069ee6ee9040becba45c2c549277dfe9c Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 26 Apr 2021 12:18:39 +0530 Subject: [PATCH 270/666] HADOOP-17650. Bump solr to unblock build failure with Maven 3.8.1 (#2939) Reviewed-by: Siyao Meng --- hadoop-project/pom.xml | 2 +- .../pom.xml | 20 +++++++++++++++++++ .../EmbeddedSolrServerFactory.java | 4 +--- .../application/TestAppCatalogSolrClient.java | 2 +- 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index b6854cea2e500..33289b97d992f 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -206,7 +206,7 @@ 3.12.2 3.9.0 1.5.6 - 7.7.0 + 8.8.2 1.0.7.Final 1.0.2 5.3.0 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml index d9481bfee894a..e790cc8cefc70 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml @@ -110,6 +110,18 @@ org.slf4j slf4j-api + + org.eclipse.jetty + jetty-http + + + org.eclipse.jetty + jetty-client + + + org.eclipse.jetty + jetty-io + @@ -134,6 +146,10 @@ org.eclipse.jetty jetty-http + + org.eclipse.jetty + jetty-client + test @@ -159,6 +175,10 @@ org.eclipse.jetty jetty-http + + org.eclipse.jetty + jetty-client + test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/test/java/org/apache/hadoop/yarn/appcatalog/application/EmbeddedSolrServerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/test/java/org/apache/hadoop/yarn/appcatalog/application/EmbeddedSolrServerFactory.java index 2a4b33853252b..eb78266695988 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/test/java/org/apache/hadoop/yarn/appcatalog/application/EmbeddedSolrServerFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/test/java/org/apache/hadoop/yarn/appcatalog/application/EmbeddedSolrServerFactory.java @@ -82,12 +82,10 @@ public static SolrClient create(final String solrHome, solrHomeDir.mkdirs(); } - final SolrResourceLoader loader = new SolrResourceLoader( - solrHomeDir.toPath()); final Path configSetPath = Paths.get(configSetHome).toAbsolutePath(); final NodeConfig config = new NodeConfig.NodeConfigBuilder( - "embeddedSolrServerNode", loader) + "embeddedSolrServerNode", solrHomeDir.toPath()) .setConfigSetBaseDirectory(configSetPath.toString()).build(); final EmbeddedSolrServer embeddedSolrServer = new EmbeddedSolrServer(config, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/test/java/org/apache/hadoop/yarn/appcatalog/application/TestAppCatalogSolrClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/test/java/org/apache/hadoop/yarn/appcatalog/application/TestAppCatalogSolrClient.java index d902de5c8cdc2..37a382021ea81 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/test/java/org/apache/hadoop/yarn/appcatalog/application/TestAppCatalogSolrClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/test/java/org/apache/hadoop/yarn/appcatalog/application/TestAppCatalogSolrClient.java @@ -47,7 +47,7 @@ public void setup() throws Exception { String targetLocation = EmbeddedSolrServerFactory.class .getProtectionDomain().getCodeSource().getLocation().getFile() + "/.."; - String solrHome = targetLocation + "/solr"; + String solrHome = targetLocation.split("/test-classes")[0] + "/solr"; solrClient = EmbeddedSolrServerFactory.create(solrHome, CONFIGSET_DIR, "exampleCollection"); spy = PowerMockito.spy(new AppCatalogSolrClient()); From b642354606f7f8234023f54b5d356cab1a8bd290 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 26 Apr 2021 16:42:32 +0800 Subject: [PATCH 271/666] Revert "HADOOP-17661. mvn versions:set fails to parse pom.xml. (#2952)" This reverts commit 66dda86fecee966c7b0625b19f700a43db9787a3. --- hadoop-project/pom.xml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 33289b97d992f..9d47bbfa098d8 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -2294,11 +2294,11 @@ true ${javac.version} ${javac.version} - - -Xlint - -Xlint:unchecked - -Xmaxwarns=9999 - + + + + 9999 + From ddaffdce2dd4ead129f2cb841da4c5d464b19ca9 Mon Sep 17 00:00:00 2001 From: litao Date: Mon, 26 Apr 2021 17:38:43 +0800 Subject: [PATCH 272/666] HDFS-15991. Add location into datanode info for NameNodeMXBean (#2933) Signed-off-by: Takanobu Asanuma --- .../main/webapps/router/federationhealth.html | 2 +- .../hdfs/server/namenode/FSNamesystem.java | 4 +++ .../src/main/webapps/hdfs/dfshealth.html | 10 ++++---- .../server/namenode/TestNameNodeMXBean.java | 25 +++++++++++++++++-- 4 files changed, 33 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html index 01751c21b1376..87d0e718bdcd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html @@ -406,7 +406,7 @@ {#EnteringMaintenanceNodes} - {name} ({xferaddr}) + {location}/{name} ({xferaddr}) {underReplicatedBlocks} {maintenanceOnlyReplicas} {underReplicateInOpenFiles} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 926c4e6482414..e789f12264ca3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6485,6 +6485,7 @@ public String getLiveNodes() { .put("infoAddr", node.getInfoAddr()) .put("infoSecureAddr", node.getInfoSecureAddr()) .put("xferaddr", node.getXferAddr()) + .put("location", node.getNetworkLocation()) .put("lastContact", getLastContact(node)) .put("usedSpace", getDfsUsed(node)) .put("adminState", node.getAdminState().toString()) @@ -6534,6 +6535,7 @@ public String getDeadNodes() { .put("decommissioned", node.isDecommissioned()) .put("adminState", node.getAdminState().toString()) .put("xferaddr", node.getXferAddr()) + .put("location", node.getNetworkLocation()) .build(); info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo); } @@ -6555,6 +6557,7 @@ public String getDecomNodes() { Map innerinfo = ImmutableMap . builder() .put("xferaddr", node.getXferAddr()) + .put("location", node.getNetworkLocation()) .put("underReplicatedBlocks", node.getLeavingServiceStatus().getUnderReplicatedBlocks()) .put("decommissionOnlyReplicas", @@ -6582,6 +6585,7 @@ public String getEnteringMaintenanceNodes() { Map attrMap = ImmutableMap . builder() .put("xferaddr", node.getXferAddr()) + .put("location", node.getNetworkLocation()) .put("underReplicatedBlocks", node.getLeavingServiceStatus().getUnderReplicatedBlocks()) .put("maintenanceOnlyReplicas", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 8622e4d3a5681..c3ba37165c998 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -326,7 +326,7 @@ {#LiveNodes} {state} - {name} ({xferaddr}) + {location}/{name} ({xferaddr}) {dnWebAddress} {lastContact}s {lastBlockReport}m @@ -349,7 +349,7 @@ {#DeadNodes} {state} - {name} ({xferaddr}) + {location}/{name} ({xferaddr}) {#helper_relative_time value="{lastContact}"/} @@ -378,7 +378,7 @@ {#EnteringMaintenanceNodes} - {name} ({xferaddr}) + {location}/{name} ({xferaddr}) {underReplicatedBlocks} {maintenanceOnlyReplicas} {underReplicateInOpenFiles} @@ -404,7 +404,7 @@ {#DecomNodes} - {name} ({xferaddr}) + {location}/{name} ({xferaddr}) {underReplicatedBlocks} {decommissionOnlyReplicas} {underReplicateInOpenFiles} @@ -433,7 +433,7 @@ {#LiveNodes} - {name} ({xferaddr}) + {location}/{name} ({xferaddr}) {#helper_date_tostring value="{lastVolumeFailureDate}"/} {volfails} {estimatedCapacityLostTotal|fmt_bytes} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index a309e324f5485..81c9cb8670066 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -112,7 +112,7 @@ public void testNameNodeMXBeanInfo() throws Exception { MiniDFSCluster cluster = null; try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); cluster.waitActive(); // Set upgrade domain on the first DN. @@ -171,7 +171,7 @@ public void testNameNodeMXBeanInfo() throws Exception { "LiveNodes")); Map> liveNodes = (Map>) JSON.parse(alivenodeinfo); - assertTrue(liveNodes.size() == 2); + assertTrue(liveNodes.size() == 4); for (Map liveNode : liveNodes.values()) { assertTrue(liveNode.containsKey("nonDfsUsedSpace")); assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) >= 0); @@ -195,6 +195,27 @@ public void testNameNodeMXBeanInfo() throws Exception { assertFalse(xferAddr.equals(dnXferAddrInMaintenance) ^ inMaintenance); } assertEquals(fsn.getLiveNodes(), alivenodeinfo); + + // Put the third DN to decommissioning state. + DatanodeDescriptor decommissioningNode = dm.getDatanode( + cluster.getDataNodes().get(2).getDatanodeId()); + decommissioningNode.startDecommission(); + + // Put the fourth DN to decommissioned state. + DatanodeDescriptor decommissionedNode = dm.getDatanode( + cluster.getDataNodes().get(3).getDatanodeId()); + decommissionedNode.setDecommissioned(); + + // Assert the location field is included in the mxbeanName + // under different states + String alivenodeinfo1 = (String) (mbs.getAttribute(mxbeanName, + "LiveNodes")); + Map> liveNodes1 = + (Map>) JSON.parse(alivenodeinfo1); + for (Map liveNode : liveNodes1.values()) { + assertTrue(liveNode.containsKey("location")); + } + // get attributes DeadNodes String deadNodeInfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes")); From 4736cebcb8c1bc6af024c1b9469602e9fb31f498 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Mon, 26 Apr 2021 11:00:23 +0100 Subject: [PATCH 273/666] HDFS-15621. Datanode DirectoryScanner uses excessive memory (#2849). Contributed by Stephen O'Donnell --- .../server/datanode/DirectoryScanner.java | 2 +- .../datanode/fsdataset/FsVolumeSpi.java | 118 ++++++++---------- .../datanode/fsdataset/impl/FsVolumeImpl.java | 5 +- .../server/datanode/TestDirectoryScanner.java | 37 +++--- .../fsdataset/impl/TestFsDatasetImpl.java | 4 +- 5 files changed, 77 insertions(+), 89 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 63865f69f29f1..a3bceec9a96a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -582,7 +582,7 @@ private void addDifference(Collection diffRecord, Stats statsRecord, long blockId, FsVolumeSpi vol) { statsRecord.missingBlockFile++; statsRecord.missingMetaFile++; - diffRecord.add(new ScanInfo(blockId, null, null, vol)); + diffRecord.add(new ScanInfo(blockId, null, null, null, vol)); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java index 68d1a15d5c3b7..8ae204364f05a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java @@ -227,27 +227,27 @@ interface BlockIterator extends Closeable { */ public static class ScanInfo implements Comparable { private final long blockId; - /** - * The block file path, relative to the volume's base directory. - * If there was no block file found, this may be null. If 'vol' - * is null, then this is the full path of the block file. + * The full path to the folder containing the block / meta files. */ - private final String blockSuffix; - + private final File basePath; /** - * The suffix of the meta file path relative to the block file. - * If blockSuffix is null, then this will be the entire path relative - * to the volume base directory, or an absolute path if vol is also - * null. + * The block file name, with no path */ - private final String metaSuffix; + private final String blockFile; + /** + * Holds the meta file name, with no path, only if blockFile is null. + * If blockFile is not null, the meta file will be named identically to + * the blockFile, but with a suffix like "_1234.meta". If the blockFile + * is present, we store only the meta file suffix. + */ + private final String metaFile; private final FsVolumeSpi volume; private final FileRegion fileRegion; /** - * Get the file's length in async block scan + * Get the file's length in async block scan. */ private final long blockLength; @@ -257,35 +257,19 @@ public static class ScanInfo implements Comparable { private final static String QUOTED_FILE_SEPARATOR = Matcher.quoteReplacement(File.separator); - /** - * Get the most condensed version of the path. - * - * For example, the condensed version of /foo//bar is /foo/bar - * Unlike {@link File#getCanonicalPath()}, this will never perform I/O - * on the filesystem. - * - * @param path the path to condense - * @return the condensed path - */ - private static String getCondensedPath(String path) { - return CONDENSED_PATH_REGEX.matcher(path). - replaceAll(QUOTED_FILE_SEPARATOR); - } - /** * Get a path suffix. * - * @param f The file to get the suffix for. + * @param f The string to get the suffix for. * @param prefix The prefix we're stripping off. * - * @return A suffix such that prefix + suffix = path to f + * @return A suffix such that prefix + suffix = f */ - private static String getSuffix(File f, String prefix) { - String fullPath = getCondensedPath(f.getAbsolutePath()); - if (fullPath.startsWith(prefix)) { - return fullPath.substring(prefix.length()); + private static String getSuffix(String f, String prefix) { + if (f.startsWith(prefix)) { + return f.substring(prefix.length()); } - throw new RuntimeException(prefix + " is not a prefix of " + fullPath); + throw new RuntimeException(prefix + " is not a prefix of " + f); } /** @@ -293,27 +277,27 @@ private static String getSuffix(File f, String prefix) { * the block data and meta-data files. * * @param blockId the block ID - * @param blockFile the path to the block data file - * @param metaFile the path to the block meta-data file + * @param basePath The full path to the directory the block is stored in + * @param blockFile The block filename, with no path + * @param metaFile The meta filename, with no path. If blockFile is not null + * then the metaFile and blockFile should have the same + * prefix, with the meta file having a suffix like + * "_1234.meta". To save memory, if the blockFile is present + * we store only the meta file suffix in the object * @param vol the volume that contains the block */ - public ScanInfo(long blockId, File blockFile, File metaFile, - FsVolumeSpi vol) { + public ScanInfo(long blockId, File basePath, String blockFile, + String metaFile, FsVolumeSpi vol) { this.blockId = blockId; - String condensedVolPath = - (vol == null || vol.getBaseURI() == null) ? null : - getCondensedPath(new File(vol.getBaseURI()).getAbsolutePath()); - this.blockSuffix = blockFile == null ? null : - getSuffix(blockFile, condensedVolPath); - this.blockLength = (blockFile != null) ? blockFile.length() : 0; - if (metaFile == null) { - this.metaSuffix = null; - } else if (blockFile == null) { - this.metaSuffix = getSuffix(metaFile, condensedVolPath); + this.basePath = basePath; + this.blockFile = blockFile; + if (blockFile != null && metaFile != null) { + this.metaFile = getSuffix(metaFile, blockFile); } else { - this.metaSuffix = getSuffix(metaFile, - condensedVolPath + blockSuffix); + this.metaFile = metaFile; } + this.blockLength = (blockFile != null) ? + new File(basePath, blockFile).length() : 0; this.volume = vol; this.fileRegion = null; } @@ -333,8 +317,9 @@ public ScanInfo(long blockId, FsVolumeSpi vol, FileRegion fileRegion, this.blockLength = length; this.volume = vol; this.fileRegion = fileRegion; - this.blockSuffix = null; - this.metaSuffix = null; + this.basePath = null; + this.blockFile = null; + this.metaFile = null; } /** @@ -343,8 +328,8 @@ public ScanInfo(long blockId, FsVolumeSpi vol, FileRegion fileRegion, * @return the block data file */ public File getBlockFile() { - return (blockSuffix == null) ? null : - new File(new File(volume.getBaseURI()).getAbsolutePath(), blockSuffix); + return (blockFile == null) ? null : + new File(basePath.getAbsolutePath(), blockFile); } /** @@ -363,15 +348,10 @@ public long getBlockLength() { * @return the block meta data file */ public File getMetaFile() { - if (metaSuffix == null) { + if (metaFile == null) { return null; } - String fileSuffix = metaSuffix; - if (blockSuffix != null) { - fileSuffix = blockSuffix + metaSuffix; - } - return new File(new File(volume.getBaseURI()).getAbsolutePath(), - fileSuffix); + return new File(basePath.getAbsolutePath(), fullMetaFile()); } /** @@ -414,14 +394,24 @@ public int hashCode() { } public long getGenStamp() { - return metaSuffix != null ? Block.getGenerationStamp( - getMetaFile().getName()) : - HdfsConstants.GRANDFATHER_GENERATION_STAMP; + return metaFile != null ? Block.getGenerationStamp(fullMetaFile()) + : HdfsConstants.GRANDFATHER_GENERATION_STAMP; } public FileRegion getFileRegion() { return fileRegion; } + + private String fullMetaFile() { + if (metaFile == null) { + return null; + } + if (blockFile == null) { + return metaFile; + } else { + return blockFile + metaFile; + } + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 6681f6fd64c0b..1dda8584f56ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -1451,7 +1451,7 @@ private void compileReport(File bpFinalizedDir, File dir, long blockId = Block.getBlockId(file.getName()); verifyFileLocation(file, bpFinalizedDir, blockId); - report.add(new ScanInfo(blockId, null, file, this)); + report.add(new ScanInfo(blockId, dir, null, fileNames.get(i), this)); } continue; } @@ -1474,7 +1474,8 @@ private void compileReport(File bpFinalizedDir, File dir, } } verifyFileLocation(blockFile, bpFinalizedDir, blockId); - report.add(new ScanInfo(blockId, blockFile, metaFile, this)); + report.add(new ScanInfo(blockId, dir, blockFile.getName(), + metaFile == null ? null : metaFile.getName(), this)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 44d99a292b4b6..e2a15a8da039a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -1040,19 +1040,21 @@ public VolumeCheckResult check(VolumeCheckContext context) private final static String BPID_2 = "BP-367845636-127.0.0.1-5895645674231"; - void testScanInfoObject(long blockId, File blockFile, File metaFile) + void testScanInfoObject(long blockId, File baseDir, String blockFile, + String metaFile) throws Exception { FsVolumeSpi.ScanInfo scanInfo = - new FsVolumeSpi.ScanInfo(blockId, blockFile, metaFile, TEST_VOLUME); + new FsVolumeSpi.ScanInfo(blockId, baseDir, blockFile, metaFile, + TEST_VOLUME); assertEquals(blockId, scanInfo.getBlockId()); if (blockFile != null) { - assertEquals(blockFile.getAbsolutePath(), + assertEquals(new File(baseDir, blockFile).getAbsolutePath(), scanInfo.getBlockFile().getAbsolutePath()); } else { assertNull(scanInfo.getBlockFile()); } if (metaFile != null) { - assertEquals(metaFile.getAbsolutePath(), + assertEquals(new File(baseDir, metaFile).getAbsolutePath(), scanInfo.getMetaFile().getAbsolutePath()); } else { assertNull(scanInfo.getMetaFile()); @@ -1062,7 +1064,7 @@ void testScanInfoObject(long blockId, File blockFile, File metaFile) void testScanInfoObject(long blockId) throws Exception { FsVolumeSpi.ScanInfo scanInfo = - new FsVolumeSpi.ScanInfo(blockId, null, null, null); + new FsVolumeSpi.ScanInfo(blockId, null, null, null, null); assertEquals(blockId, scanInfo.getBlockId()); assertNull(scanInfo.getBlockFile()); assertNull(scanInfo.getMetaFile()); @@ -1071,24 +1073,19 @@ void testScanInfoObject(long blockId) throws Exception { @Test(timeout = 120000) public void TestScanInfo() throws Exception { testScanInfoObject(123, - new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath(), - "blk_123"), - new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath(), - "blk_123__1001.meta")); + new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath()), + "blk_123", "blk_123__1001.meta"); testScanInfoObject(464, - new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath(), - "blk_123"), - null); - testScanInfoObject(523, null, - new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath(), - "blk_123__1009.meta")); - testScanInfoObject(789, null, null); + new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath()), + "blk_123", null); + testScanInfoObject(523, + new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath()), + null, "blk_123__1009.meta"); + testScanInfoObject(789, null, null, null); testScanInfoObject(456); testScanInfoObject(123, - new File(TEST_VOLUME.getFinalizedDir(BPID_2).getAbsolutePath(), - "blk_567"), - new File(TEST_VOLUME.getFinalizedDir(BPID_2).getAbsolutePath(), - "blk_567__1004.meta")); + new File(TEST_VOLUME.getFinalizedDir(BPID_2).getAbsolutePath()), + "blk_567", "blk_567__1004.meta"); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 778ef97180b41..fbd9f005e3895 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -1786,8 +1786,8 @@ public void testNotifyNamenodeMissingOrNewBlock() throws Exception { assertFalse(metaFile.exists()); FsVolumeSpi.ScanInfo info = new FsVolumeSpi.ScanInfo( - replicaInfo.getBlockId(), blockFile.getAbsoluteFile(), - metaFile.getAbsoluteFile(), replicaInfo.getVolume()); + replicaInfo.getBlockId(), blockFile.getParentFile().getAbsoluteFile(), + blockFile.getName(), metaFile.getName(), replicaInfo.getVolume()); fsdataset.checkAndUpdate(bpid, info); BlockManager blockManager = cluster.getNameNode(). From 88706b6c058e028a70ad4ef4e5ba3c4453f1b96c Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Sat, 24 Apr 2021 10:10:10 +0200 Subject: [PATCH 274/666] YARN-10637. fs2cs: add queue autorefresh policy during conversion. Contributed by Qi Zhu. --- .../fair/converter/FSYarnSiteConverter.java | 6 ++++++ .../fair/converter/TestFSYarnSiteConverter.java | 14 ++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java index 401c056cc999e..4f9029a79d0fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java @@ -24,6 +24,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AutoCreatedQueueDeletionPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueueConfigurationAutoRefreshPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; @@ -111,6 +112,11 @@ public void convertSiteProperties(Configuration conf, CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false); } + // Make auto cs conf refresh enabled. + yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, + addMonitorPolicy(QueueConfigurationAutoRefreshPolicy + .class.getCanonicalName(), yarnSiteConfig)); + int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN, FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN); if (maxAssign != FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java index 55ac242332309..4498373d73d01 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java @@ -21,6 +21,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AutoCreatedQueueDeletionPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueueConfigurationAutoRefreshPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; @@ -69,6 +70,19 @@ public void testSiteContinuousSchedulingConversion() { ".scheduling-interval-ms", -1)); } + @Test + public void testSiteQueueConfAutoRefreshConversion() { + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false, false, null); + assertTrue(yarnConvertedConfig.get(YarnConfiguration. + RM_SCHEDULER_ENABLE_MONITORS), true); + assertTrue("Scheduling Policies contains queue conf auto refresh", + yarnConvertedConfig. + get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES) + .contains(QueueConfigurationAutoRefreshPolicy. + class.getCanonicalName())); + } + @Test public void testSitePreemptionConversion() { yarnConfig.setBoolean(FairSchedulerConfiguration.PREEMPTION, true); From 6f2e972909a2ea2cf4ff27e269da049f2ab477e9 Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Sat, 24 Apr 2021 16:58:45 +0530 Subject: [PATCH 275/666] changes for noop tracer --- hadoop-common-project/hadoop-common/pom.xml | 12 ------------ .../main/java/org/apache/hadoop/tracing/Tracer.java | 3 +-- hadoop-project/pom.xml | 10 ---------- 3 files changed, 1 insertion(+), 24 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 1acb39f056c2d..faf1511e57f23 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -386,18 +386,6 @@ opentelemetry-sdk-extension-autoconfigure ${opentelemetry.version}-alpha - - io.opentelemetry - opentelemetry-exporter-jaeger - ${opentelemetry.version} - test - - - io.grpc - grpc-netty-shaded - 1.26.0 - test - diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index 5fe885f0ec798..b53f5c692d592 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -106,8 +106,7 @@ public Builder conf(TraceConfiguration conf) { static OpenTelemetry initialiseTracer(String name) { //TODO: Explore the possibility of moving these properties to config or environment variables System.setProperty("otel.metrics.exporter", "none"); - System.setProperty("otel.traces.exporter", "jaeger"); - System.setProperty("otel.exporter.jaeger.endpoint", "http://localhost:14250"); + System.setProperty("otel.traces.exporter", "none"); System.setProperty("otel.resource.attributes", String.format("service.name=%s", name)); OpenTelemetry openTelemetry = OpenTelemetrySdkAutoConfiguration.initialize(); return openTelemetry; diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 9d47bbfa098d8..d6a67df81dfb5 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1757,16 +1757,6 @@ lz4-java ${lz4-java.version} - - io.opentelemetry - opentelemetry-exporter-jaeger - ${opentelemetry.version} - - - io.grpc - grpc-netty-shaded - 1.26.0 - From a0760bbdbe982ef9d88e3d3ba215252f7f712bff Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 26 Apr 2021 18:42:42 -0700 Subject: [PATCH 276/666] HADOOP-17661. mvn versions:set fails to parse pom.xml. (#2956) Signed-off-by: Akira Ajisaka Change-Id: I7de255cb9a167e3a5e25ae339b050e82cb4b7c30 --- hadoop-project/pom.xml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index d6a67df81dfb5..c93afaeff50db 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -2284,11 +2284,12 @@ true ${javac.version} ${javac.version} - - - - 9999 - + + -Xlint + -Xlint:unchecked + -Xmaxwarns + 9999 + From cb416d937e818e3527fb4b94bfa518600ac894e4 Mon Sep 17 00:00:00 2001 From: bshashikant Date: Tue, 27 Apr 2021 09:15:06 +0530 Subject: [PATCH 277/666] HDFS-15961. standby namenode failed to start ordered snapshot deletion is enabled while having snapshottable directories (#2881) --- .../hdfs/server/namenode/FSNamesystem.java | 39 +++++++++++++------ .../hadoop/hdfs/server/namenode/NameNode.java | 1 + .../server/namenode/ha/TestHASafeMode.java | 37 ++++++++++++++++++ 3 files changed, 66 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index e789f12264ca3..7b9efea538e18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -124,7 +124,12 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksMBean; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import org.apache.hadoop.ipc.ObserverRetryOnActiveException; -import org.apache.hadoop.util.*; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.VersionInfo; import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.util.Time.monotonicNow; @@ -8560,25 +8565,36 @@ void checkAccess(String src, FsAction mode) throws IOException { /** * Check if snapshot roots are created for all existing snapshottable * directories. Create them if not. + * Only the active NameNode needs to execute this in HA setup once it is out + * of safe mode. + * + * The function gets called while exiting safe mode or post starting the + * services in Active NameNode, but comes into effect post whichever event + * happens later. */ @Override - public void checkAndProvisionSnapshotTrashRoots() { - if (isSnapshotTrashRootEnabled) { + public synchronized void checkAndProvisionSnapshotTrashRoots() { + if (isSnapshotTrashRootEnabled && (haEnabled && inActiveState() + || !haEnabled) && !blockManager.isInSafeMode()) { + SnapshottableDirectoryStatus dirStatus = null; try { SnapshottableDirectoryStatus[] dirStatusList = getSnapshottableDirListing(); if (dirStatusList == null) { return; } - for (SnapshottableDirectoryStatus dirStatus : dirStatusList) { + for (SnapshottableDirectoryStatus status : dirStatusList) { + dirStatus = status; String currDir = dirStatus.getFullPath().toString(); if (!currDir.endsWith(Path.SEPARATOR)) { currDir += Path.SEPARATOR; } String trashPath = currDir + FileSystem.TRASH_PREFIX; - HdfsFileStatus fileStatus = getFileInfo(trashPath, false, false, false); + HdfsFileStatus fileStatus = + getFileInfo(trashPath, false, false, false); if (fileStatus == null) { - LOG.info("Trash doesn't exist for snapshottable directory {}. " + "Creating trash at {}", currDir, trashPath); + LOG.info("Trash doesn't exist for snapshottable directory {}. " + + "Creating trash at {}", currDir, trashPath); PermissionStatus permissionStatus = new PermissionStatus(getRemoteUser().getShortUserName(), null, SHARED_TRASH_PERMISSION); @@ -8586,12 +8602,13 @@ public void checkAndProvisionSnapshotTrashRoots() { } } } catch (IOException e) { - final String msg = - "Could not provision Trash directory for existing " - + "snapshottable directories. Exiting Namenode."; - ExitUtil.terminate(1, msg); + if (dirStatus == null) { + LOG.error("Failed to get snapshottable directory list", e); + } else { + LOG.error("Could not provision Trash directory for existing " + + "snapshottable directory {}", dirStatus, e); + } } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index ed3d80bbfc201..4692229be2f4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -2021,6 +2021,7 @@ public HAState getState() { public void startActiveServices() throws IOException { try { namesystem.startActiveServices(); + namesystem.checkAndProvisionSnapshotTrashRoots(); startTrashEmptier(getConf()); } catch (Throwable t) { doImmediateShutdown(t); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index e17bb6f53cb49..562eb198fdb8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -98,6 +98,7 @@ public void setupCluster() throws Exception { conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + conf.setBoolean("dfs.namenode.snapshot.trashroot.enabled", false); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) @@ -909,6 +910,42 @@ public Boolean get() { assertSafeMode(nn1, 3, 3, 3, 0); } + @Test + public void testNameNodeCreateSnapshotTrashRootOnHASetup() throws Exception { + DistributedFileSystem dfs = cluster.getFileSystem(0); + final Path testDir = new Path("/disallowss/test2/"); + final Path file0path = new Path(testDir, "file-0"); + dfs.create(file0path).close(); + dfs.allowSnapshot(testDir); + // .Trash won't be created right now since snapshot trash is disabled + final Path trashRoot = new Path(testDir, FileSystem.TRASH_PREFIX); + assertFalse(dfs.exists(trashRoot)); + // Set dfs.namenode.snapshot.trashroot.enabled=true + cluster.getNameNode(0).getConf() + .setBoolean("dfs.namenode.snapshot.trashroot.enabled", true); + cluster.getNameNode(1).getConf() + .setBoolean("dfs.namenode.snapshot.trashroot.enabled", true); + restartActive(); + cluster.transitionToActive(1); + dfs = cluster.getFileSystem(1); + // Make sure .Trash path does not exist yet as on NN1 trash root is not + // enabled + assertFalse(dfs.exists(trashRoot)); + cluster.transitionToStandby(1); + cluster.transitionToActive(0); + dfs = cluster.getFileSystem(0); + // Check .Trash existence, should be created now + assertTrue(dfs.exists(trashRoot)); + assertFalse(cluster.getNameNode(0).isInSafeMode()); + restartStandby(); + // Ensure Standby namenode is up and running + assertTrue(cluster.getNameNode(1).isStandbyState()); + // Cleanup + dfs.delete(trashRoot, true); + dfs.disallowSnapshot(testDir); + dfs.delete(testDir, true); + } + /** * Test transition to active when namenode in safemode. * From 870560b6d677e64c0eb3bb0d9d4798018a138115 Mon Sep 17 00:00:00 2001 From: bilaharith <52483117+bilaharith@users.noreply.github.com> Date: Tue, 27 Apr 2021 15:45:52 +0530 Subject: [PATCH 278/666] HADOOP-17536. ABFS: Supporting customer provided encryption key (#2707) Contributed by bilahari t h --- .../hadoop/fs/azurebfs/AbfsConfiguration.java | 5 + .../azurebfs/constants/ConfigurationKeys.java | 2 + .../constants/FileSystemConfigurations.java | 2 + .../constants/HttpHeaderConfigurations.java | 5 + .../fs/azurebfs/services/AbfsClient.java | 66 +- .../fs/azurebfs/ITestCustomerProvidedKey.java | 936 ++++++++++++++++++ .../constants/TestConfigurationKeys.java | 4 + .../fs/azurebfs/services/TestAbfsClient.java | 6 +- 8 files changed, 1019 insertions(+), 7 deletions(-) create mode 100644 hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index 0a8224aaaeb58..1c4a09be3c9a7 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -948,6 +948,11 @@ public boolean enableAbfsListIterator() { return this.enableAbfsListIterator; } + public String getClientProvidedEncryptionKey() { + String accSpecEncKey = accountConf(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY); + return rawConfig.get(accSpecEncKey, null); + } + @VisibleForTesting void setReadBufferSize(int bufferSize) { this.readBufferSize = bufferSize; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java index 4fe1d1c276db5..2dbb2b9b08db8 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java @@ -143,6 +143,8 @@ public final class ConfigurationKeys { public static final String AZURE_KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT = "fs.azure.shellkeyprovider.script"; /** Setting this true will make the driver use it's own RemoteIterator implementation */ public static final String FS_AZURE_ENABLE_ABFS_LIST_ITERATOR = "fs.azure.enable.abfslistiterator"; + /** Server side encryption key */ + public static final String FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY = "fs.azure.client-provided-encryption-key"; /** End point of ABFS account: {@value}. */ public static final String AZURE_ABFS_ENDPOINT = "fs.azure.abfs.endpoint"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java index 040b18ae4c281..dc4caa98a5e60 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java @@ -68,6 +68,8 @@ public final class FileSystemConfigurations { public static final String AZURE_BLOCK_LOCATION_HOST_DEFAULT = "localhost"; public static final int DEFAULT_AZURE_LIST_MAX_RESULTS = 5000; + public static final String SERVER_SIDE_ENCRYPTION_ALGORITHM = "AES256"; + public static final int MAX_CONCURRENT_READ_THREADS = 12; public static final int MAX_CONCURRENT_WRITE_THREADS = 8; public static final boolean DEFAULT_READ_TOLERATE_CONCURRENT_APPEND = false; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java index 232553844fcf3..d4065ac2836d0 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java @@ -60,6 +60,11 @@ public final class HttpHeaderConfigurations { public static final String X_MS_UMASK = "x-ms-umask"; public static final String X_MS_NAMESPACE_ENABLED = "x-ms-namespace-enabled"; public static final String X_MS_ABFS_CLIENT_LATENCY = "x-ms-abfs-client-latency"; + public static final String X_MS_ENCRYPTION_KEY = "x-ms-encryption-key"; + public static final String X_MS_ENCRYPTION_KEY_SHA256 = "x-ms-encryption-key-sha256"; + public static final String X_MS_ENCRYPTION_ALGORITHM = "x-ms-encryption-algorithm"; + public static final String X_MS_REQUEST_SERVER_ENCRYPTED = "x-ms-request-server-encrypted"; + public static final String X_MS_SERVER_ENCRYPTED = "x-ms-server-encrypted"; public static final String X_MS_LEASE_ACTION = "x-ms-lease-action"; public static final String X_MS_LEASE_DURATION = "x-ms-lease-duration"; public static final String X_MS_LEASE_ID = "x-ms-lease-id"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index 7c8a2112bfa46..c5c218d3fb257 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -25,8 +25,12 @@ import java.net.MalformedURLException; import java.net.URL; import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.time.Instant; import java.util.ArrayList; +import java.util.Base64; import java.util.List; import java.util.Locale; import java.util.UUID; @@ -65,6 +69,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.*; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_DELETE_CONSIDERED_IDEMPOTENT; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.SERVER_SIDE_ENCRYPTION_ALGORITHM; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes.HTTPS_SCHEME; import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.*; import static org.apache.hadoop.fs.azurebfs.constants.HttpQueryParams.*; @@ -74,6 +79,7 @@ */ public class AbfsClient implements Closeable { public static final Logger LOG = LoggerFactory.getLogger(AbfsClient.class); + private final URL baseUrl; private final SharedKeyCredentials sharedKeyCredentials; private final String xMsVersion = "2019-12-12"; @@ -82,6 +88,8 @@ public class AbfsClient implements Closeable { private final AbfsConfiguration abfsConfiguration; private final String userAgent; private final AbfsPerfTracker abfsPerfTracker; + private final String clientProvidedEncryptionKey; + private final String clientProvidedEncryptionKeySHA; private final String accountName; private final AuthType authType; @@ -93,7 +101,8 @@ public class AbfsClient implements Closeable { private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, final AbfsConfiguration abfsConfiguration, - final AbfsClientContext abfsClientContext) { + final AbfsClientContext abfsClientContext) + throws IOException { this.baseUrl = baseUrl; this.sharedKeyCredentials = sharedKeyCredentials; String baseUrlString = baseUrl.toString(); @@ -103,6 +112,17 @@ private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCreden this.accountName = abfsConfiguration.getAccountName().substring(0, abfsConfiguration.getAccountName().indexOf(AbfsHttpConstants.DOT)); this.authType = abfsConfiguration.getAuthType(accountName); + String encryptionKey = this.abfsConfiguration + .getClientProvidedEncryptionKey(); + if (encryptionKey != null) { + this.clientProvidedEncryptionKey = getBase64EncodedString(encryptionKey); + this.clientProvidedEncryptionKeySHA = getBase64EncodedString( + getSHA256Hash(encryptionKey)); + } else { + this.clientProvidedEncryptionKey = null; + this.clientProvidedEncryptionKeySHA = null; + } + String sslProviderName = null; if (this.baseUrl.toString().startsWith(HTTPS_SCHEME)) { @@ -131,7 +151,8 @@ private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCreden public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, final AbfsConfiguration abfsConfiguration, final AccessTokenProvider tokenProvider, - final AbfsClientContext abfsClientContext) { + final AbfsClientContext abfsClientContext) + throws IOException { this(baseUrl, sharedKeyCredentials, abfsConfiguration, abfsClientContext); this.tokenProvider = tokenProvider; } @@ -139,11 +160,29 @@ public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredent public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, final AbfsConfiguration abfsConfiguration, final SASTokenProvider sasTokenProvider, - final AbfsClientContext abfsClientContext) { + final AbfsClientContext abfsClientContext) + throws IOException { this(baseUrl, sharedKeyCredentials, abfsConfiguration, abfsClientContext); this.sasTokenProvider = sasTokenProvider; } + private byte[] getSHA256Hash(String key) throws IOException { + try { + final MessageDigest digester = MessageDigest.getInstance("SHA-256"); + return digester.digest(key.getBytes(StandardCharsets.UTF_8)); + } catch (NoSuchAlgorithmException e) { + throw new IOException(e); + } + } + + private String getBase64EncodedString(String key) { + return getBase64EncodedString(key.getBytes(StandardCharsets.UTF_8)); + } + + private String getBase64EncodedString(byte[] bytes) { + return Base64.getEncoder().encodeToString(bytes); + } + @Override public void close() throws IOException { if (tokenProvider instanceof Closeable) { @@ -180,6 +219,18 @@ List createDefaultHeaders() { return requestHeaders; } + private void addCustomerProvidedKeyHeaders( + final List requestHeaders) { + if (clientProvidedEncryptionKey != null) { + requestHeaders.add( + new AbfsHttpHeader(X_MS_ENCRYPTION_KEY, clientProvidedEncryptionKey)); + requestHeaders.add(new AbfsHttpHeader(X_MS_ENCRYPTION_KEY_SHA256, + clientProvidedEncryptionKeySHA)); + requestHeaders.add(new AbfsHttpHeader(X_MS_ENCRYPTION_ALGORITHM, + SERVER_SIDE_ENCRYPTION_ALGORITHM)); + } + } + AbfsUriQueryBuilder createDefaultUriQueryBuilder() { final AbfsUriQueryBuilder abfsUriQueryBuilder = new AbfsUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_TIMEOUT, DEFAULT_TIMEOUT); @@ -289,6 +340,9 @@ public AbfsRestOperation createPath(final String path, final boolean isFile, fin final String permission, final String umask, final boolean isAppendBlob, final String eTag) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); + if (isFile) { + addCustomerProvidedKeyHeaders(requestHeaders); + } if (!overwrite) { requestHeaders.add(new AbfsHttpHeader(IF_NONE_MATCH, AbfsHttpConstants.STAR)); } @@ -510,6 +564,7 @@ public AbfsRestOperation append(final String path, final byte[] buffer, AppendRequestParameters reqParams, final String cachedSasToken) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); + addCustomerProvidedKeyHeaders(requestHeaders); // JDK7 does not support PATCH, so to workaround the issue we will use // PUT and specify the real method in the X-Http-Method-Override header. requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE, @@ -596,6 +651,7 @@ public AbfsRestOperation flush(final String path, final long position, boolean r boolean isClose, final String cachedSasToken, final String leaseId) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); + addCustomerProvidedKeyHeaders(requestHeaders); // JDK7 does not support PATCH, so to workaround the issue we will use // PUT and specify the real method in the X-Http-Method-Override header. requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE, @@ -627,6 +683,7 @@ public AbfsRestOperation flush(final String path, final long position, boolean r public AbfsRestOperation setPathProperties(final String path, final String properties) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); + addCustomerProvidedKeyHeaders(requestHeaders); // JDK7 does not support PATCH, so to workaround the issue we will use // PUT and specify the real method in the X-Http-Method-Override header. requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE, @@ -660,6 +717,8 @@ public AbfsRestOperation getPathStatus(final String path, final boolean includeP // only traversal (execute) permission is required. abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.GET_STATUS); operation = SASTokenProvider.GET_STATUS_OPERATION; + } else { + addCustomerProvidedKeyHeaders(requestHeaders); } abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_UPN, String.valueOf(abfsConfiguration.isUpnUsed())); appendSASTokenToQuery(path, operation, abfsUriQueryBuilder); @@ -678,6 +737,7 @@ public AbfsRestOperation getPathStatus(final String path, final boolean includeP public AbfsRestOperation read(final String path, final long position, final byte[] buffer, final int bufferOffset, final int bufferLength, final String eTag, String cachedSasToken) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); + addCustomerProvidedKeyHeaders(requestHeaders); requestHeaders.add(new AbfsHttpHeader(RANGE, String.format("bytes=%d-%d", position, position + bufferLength - 1))); requestHeaders.add(new AbfsHttpHeader(IF_MATCH, eTag)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java new file mode 100644 index 0000000000000..9229905b4623c --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java @@ -0,0 +1,936 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 ("License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.CharBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.Charset; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.EnumSet; +import java.util.Hashtable; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Random; + +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.assertj.core.api.Assertions; +import org.junit.Assume; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; +import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; +import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters; +import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode; +import org.apache.hadoop.fs.azurebfs.services.AbfsAclHelper; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpHeader; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.fs.azurebfs.services.AuthType; +import org.apache.hadoop.fs.azurebfs.utils.Base64; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.test.LambdaTestUtils; + +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB; +import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_ENCRYPTION_ALGORITHM; +import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_ENCRYPTION_KEY; +import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_ENCRYPTION_KEY_SHA256; +import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_REQUEST_SERVER_ENCRYPTED; +import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_SERVER_ENCRYPTED; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_ABFS_ACCOUNT_NAME; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_ACCOUNT_KEY; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_CPK_ENABLED; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_CPK_ENABLED_SECONDARY_ACCOUNT; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_CPK_ENABLED_SECONDARY_ACCOUNT_KEY; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.TEST_CONFIGURATION_FILE_NAME; +import static org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers.aclEntry; +import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryType.USER; +import static org.apache.hadoop.fs.permission.FsAction.ALL; + +public class ITestCustomerProvidedKey extends AbstractAbfsIntegrationTest { + private static final Logger LOG = LoggerFactory + .getLogger(ITestCustomerProvidedKey.class); + + private static final String XMS_PROPERTIES_ENCODING = "ISO-8859-1"; + private static final int INT_512 = 512; + private static final int INT_50 = 50; + private static final int ENCRYPTION_KEY_LEN = 32; + private static final int FILE_SIZE = 10 * ONE_MB; + private static final int FILE_SIZE_FOR_COPY_BETWEEN_ACCOUNTS = 24 * ONE_MB; + + public ITestCustomerProvidedKey() throws Exception { + boolean isCPKTestsEnabled = getConfiguration() + .getBoolean(FS_AZURE_TEST_CPK_ENABLED, false); + Assume.assumeTrue(isCPKTestsEnabled); + } + + @Test + public void testReadWithCPK() throws Exception { + final AzureBlobFileSystem fs = getAbfs(true); + String fileName = "/" + methodName.getMethodName(); + createFileAndGetContent(fs, fileName, FILE_SIZE); + + AbfsClient abfsClient = fs.getAbfsClient(); + int length = FILE_SIZE; + byte[] buffer = new byte[length]; + final AbfsRestOperation op = abfsClient.getPathStatus(fileName, false); + final String eTag = op.getResult() + .getResponseHeader(HttpHeaderConfigurations.ETAG); + AbfsRestOperation abfsRestOperation = abfsClient + .read(fileName, 0, buffer, 0, length, eTag, null); + assertCPKHeaders(abfsRestOperation, true); + assertResponseHeader(abfsRestOperation, true, X_MS_ENCRYPTION_KEY_SHA256, + getCPKSha(fs)); + assertResponseHeader(abfsRestOperation, true, X_MS_SERVER_ENCRYPTED, + "true"); + assertResponseHeader(abfsRestOperation, false, + X_MS_REQUEST_SERVER_ENCRYPTED, ""); + + // Trying to read with different CPK headers + Configuration conf = fs.getConf(); + String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME); + conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName, + "different-1234567890123456789012"); + try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); + FSDataInputStream iStream = fs2.open(new Path(fileName))) { + int len = 8 * ONE_MB; + byte[] b = new byte[len]; + LambdaTestUtils.intercept(IOException.class, () -> { + iStream.read(b, 0, len); + }); + } + + // Trying to read with no CPK headers + conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName); + try (AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem + .get(conf); FSDataInputStream iStream = fs3.open(new Path(fileName))) { + int len = 8 * ONE_MB; + byte[] b = new byte[len]; + LambdaTestUtils.intercept(IOException.class, () -> { + iStream.read(b, 0, len); + }); + } + } + + @Test + public void testReadWithoutCPK() throws Exception { + final AzureBlobFileSystem fs = getAbfs(false); + String fileName = "/" + methodName.getMethodName(); + createFileAndGetContent(fs, fileName, FILE_SIZE); + + AbfsClient abfsClient = fs.getAbfsClient(); + int length = INT_512; + byte[] buffer = new byte[length * 4]; + final AbfsRestOperation op = abfsClient.getPathStatus(fileName, false); + final String eTag = op.getResult() + .getResponseHeader(HttpHeaderConfigurations.ETAG); + AbfsRestOperation abfsRestOperation = abfsClient + .read(fileName, 0, buffer, 0, length, eTag, null); + assertCPKHeaders(abfsRestOperation, false); + assertResponseHeader(abfsRestOperation, false, X_MS_ENCRYPTION_KEY_SHA256, + getCPKSha(fs)); + assertResponseHeader(abfsRestOperation, true, X_MS_SERVER_ENCRYPTED, + "true"); + assertResponseHeader(abfsRestOperation, false, + X_MS_REQUEST_SERVER_ENCRYPTED, ""); + + // Trying to read with CPK headers + Configuration conf = fs.getConf(); + String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME); + conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName, + "12345678901234567890123456789012"); + + try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); + AbfsClient abfsClient2 = fs2.getAbfsClient()) { + LambdaTestUtils.intercept(IOException.class, () -> { + abfsClient2.read(fileName, 0, buffer, 0, length, eTag, null); + }); + } + } + + @Test + public void testAppendWithCPK() throws Exception { + final AzureBlobFileSystem fs = getAbfs(true); + final String fileName = "/" + methodName.getMethodName(); + createFileAndGetContent(fs, fileName, FILE_SIZE); + + // Trying to append with correct CPK headers + AppendRequestParameters appendRequestParameters = + new AppendRequestParameters( + 0, 0, 5, Mode.APPEND_MODE, false, null); + byte[] buffer = getRandomBytesArray(5); + AbfsClient abfsClient = fs.getAbfsClient(); + AbfsRestOperation abfsRestOperation = abfsClient + .append(fileName, buffer, appendRequestParameters, null); + assertCPKHeaders(abfsRestOperation, true); + assertResponseHeader(abfsRestOperation, true, X_MS_ENCRYPTION_KEY_SHA256, + getCPKSha(fs)); + assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, ""); + assertResponseHeader(abfsRestOperation, true, X_MS_REQUEST_SERVER_ENCRYPTED, + "true"); + + // Trying to append with different CPK headers + Configuration conf = fs.getConf(); + String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME); + conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName, + "different-1234567890123456789012"); + try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); + AbfsClient abfsClient2 = fs2.getAbfsClient()) { + LambdaTestUtils.intercept(IOException.class, () -> { + abfsClient2.append(fileName, buffer, appendRequestParameters, null); + }); + } + + // Trying to append with no CPK headers + conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName); + try (AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem + .get(conf); AbfsClient abfsClient3 = fs3.getAbfsClient()) { + LambdaTestUtils.intercept(IOException.class, () -> { + abfsClient3.append(fileName, buffer, appendRequestParameters, null); + }); + } + } + + @Test + public void testAppendWithoutCPK() throws Exception { + final AzureBlobFileSystem fs = getAbfs(false); + final String fileName = "/" + methodName.getMethodName(); + createFileAndGetContent(fs, fileName, FILE_SIZE); + + // Trying to append without CPK headers + AppendRequestParameters appendRequestParameters = + new AppendRequestParameters( + 0, 0, 5, Mode.APPEND_MODE, false, null); + byte[] buffer = getRandomBytesArray(5); + AbfsClient abfsClient = fs.getAbfsClient(); + AbfsRestOperation abfsRestOperation = abfsClient + .append(fileName, buffer, appendRequestParameters, null); + assertCPKHeaders(abfsRestOperation, false); + assertResponseHeader(abfsRestOperation, false, X_MS_ENCRYPTION_KEY_SHA256, + ""); + assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, ""); + assertResponseHeader(abfsRestOperation, true, X_MS_REQUEST_SERVER_ENCRYPTED, + "true"); + + // Trying to append with CPK headers + Configuration conf = fs.getConf(); + String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME); + conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName, + "12345678901234567890123456789012"); + try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); + AbfsClient abfsClient2 = fs2.getAbfsClient()) { + LambdaTestUtils.intercept(IOException.class, () -> { + abfsClient2.append(fileName, buffer, appendRequestParameters, null); + }); + } + } + + @Test + public void testSetGetXAttr() throws Exception { + final AzureBlobFileSystem fs = getAbfs(true); + String fileName = methodName.getMethodName(); + createFileAndGetContent(fs, fileName, FILE_SIZE); + + String valSent = "testValue"; + String attrName = "testXAttr"; + + // set get and verify + fs.setXAttr(new Path(fileName), attrName, + valSent.getBytes(StandardCharsets.UTF_8), + EnumSet.of(XAttrSetFlag.CREATE)); + byte[] valBytes = fs.getXAttr(new Path(fileName), attrName); + String valRecieved = new String(valBytes); + assertEquals(valSent, valRecieved); + + // set new value get and verify + valSent = "new value"; + fs.setXAttr(new Path(fileName), attrName, + valSent.getBytes(StandardCharsets.UTF_8), + EnumSet.of(XAttrSetFlag.REPLACE)); + valBytes = fs.getXAttr(new Path(fileName), attrName); + valRecieved = new String(valBytes); + assertEquals(valSent, valRecieved); + + // Read without CPK header + LambdaTestUtils.intercept(IOException.class, () -> { + getAbfs(false).getXAttr(new Path(fileName), attrName); + }); + + // Wrong CPK + LambdaTestUtils.intercept(IOException.class, () -> { + getSameFSWithWrongCPK(fs).getXAttr(new Path(fileName), attrName); + }); + } + + @Test + public void testCopyBetweenAccounts() throws Exception { + String accountName = getRawConfiguration() + .get(FS_AZURE_TEST_CPK_ENABLED_SECONDARY_ACCOUNT); + String accountKey = getRawConfiguration() + .get(FS_AZURE_TEST_CPK_ENABLED_SECONDARY_ACCOUNT_KEY); + Assume.assumeTrue(accountName != null && !accountName.isEmpty()); + Assume.assumeTrue(accountKey != null && !accountKey.isEmpty()); + String fileSystemName = "cpkfs"; + + // Create fs1 and a file with CPK + AzureBlobFileSystem fs1 = getAbfs(true); + int fileSize = FILE_SIZE_FOR_COPY_BETWEEN_ACCOUNTS; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs1, "fs1-file.txt", fileContent); + + // Create fs2 with different CPK + Configuration conf = new Configuration(); + conf.addResource(TEST_CONFIGURATION_FILE_NAME); + conf.setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, true); + conf.unset(FS_AZURE_ABFS_ACCOUNT_NAME); + conf.set(FS_AZURE_ABFS_ACCOUNT_NAME, accountName); + conf.set(FS_AZURE_ACCOUNT_KEY + "." + accountName, accountKey); + conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName, + "123456789012345678901234567890ab"); + conf.set("fs.defaultFS", "abfs://" + fileSystemName + "@" + accountName); + AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); + + // Read from fs1 and write to fs2, fs1 and fs2 are having different CPK + Path fs2DestFilePath = new Path("fs2-dest-file.txt"); + FSDataOutputStream ops = fs2.create(fs2DestFilePath); + try (FSDataInputStream iStream = fs1.open(testFilePath)) { + long totalBytesRead = 0; + do { + int length = 8 * ONE_MB; + byte[] buffer = new byte[length]; + int bytesRead = iStream.read(buffer, 0, length); + totalBytesRead += bytesRead; + ops.write(buffer); + } while (totalBytesRead < fileContent.length); + ops.close(); + } + + // Trying to read fs2DestFilePath with different CPK headers + conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName); + conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName, + "different-1234567890123456789012"); + try (AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem + .get(conf); FSDataInputStream iStream = fs3.open(fs2DestFilePath)) { + int length = 8 * ONE_MB; + byte[] buffer = new byte[length]; + LambdaTestUtils.intercept(IOException.class, () -> { + iStream.read(buffer, 0, length); + }); + } + + // Trying to read fs2DestFilePath with no CPK headers + conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName); + try (AzureBlobFileSystem fs4 = (AzureBlobFileSystem) FileSystem + .get(conf); FSDataInputStream iStream = fs4.open(fs2DestFilePath)) { + int length = 8 * ONE_MB; + byte[] buffer = new byte[length]; + LambdaTestUtils.intercept(IOException.class, () -> { + iStream.read(buffer, 0, length); + }); + } + + // Read fs2DestFilePath and verify the content with the initial random + // bytes created and wrote into the source file at fs1 + try (FSDataInputStream iStream = fs2.open(fs2DestFilePath)) { + long totalBytesRead = 0; + int pos = 0; + do { + int length = 8 * ONE_MB; + byte[] buffer = new byte[length]; + int bytesRead = iStream.read(buffer, 0, length); + totalBytesRead += bytesRead; + for (int i = 0; i < bytesRead; i++) { + assertEquals(fileContent[pos + i], buffer[i]); + } + pos = pos + bytesRead; + } while (totalBytesRead < fileContent.length); + } + } + + @Test + public void testListPathWithCPK() throws Exception { + testListPath(true); + } + + @Test + public void testListPathWithoutCPK() throws Exception { + testListPath(false); + } + + private void testListPath(final boolean isWithCPK) throws Exception { + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + String testDirName = "/" + methodName.getMethodName(); + final Path testPath = new Path(testDirName); + fs.mkdirs(testPath); + createFileAndGetContent(fs, testDirName + "/aaa", FILE_SIZE); + createFileAndGetContent(fs, testDirName + "/bbb", FILE_SIZE); + AbfsClient abfsClient = fs.getAbfsClient(); + AbfsRestOperation abfsRestOperation = abfsClient + .listPath(testDirName, false, INT_50, null); + assertListstatus(fs, abfsRestOperation, testPath); + + // Trying with different CPK headers + Configuration conf = fs.getConf(); + String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME); + conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName, + "different-1234567890123456789012"); + AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); + AbfsClient abfsClient2 = fs2.getAbfsClient(); + abfsRestOperation = abfsClient2.listPath(testDirName, false, INT_50, null); + assertListstatus(fs, abfsRestOperation, testPath); + + if (isWithCPK) { + // Trying with no CPK headers + conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName); + AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem.get(conf); + AbfsClient abfsClient3 = fs3.getAbfsClient(); + abfsRestOperation = abfsClient3 + .listPath(testDirName, false, INT_50, null); + assertListstatus(fs, abfsRestOperation, testPath); + } + } + + private void assertListstatus(AzureBlobFileSystem fs, + AbfsRestOperation abfsRestOperation, Path testPath) throws IOException { + assertCPKHeaders(abfsRestOperation, false); + assertNoCPKResponseHeadersPresent(abfsRestOperation); + + FileStatus[] listStatuses = fs.listStatus(testPath); + Assertions.assertThat(listStatuses.length) + .describedAs("listStatuses should have 2 entries").isEqualTo(2); + + listStatuses = getSameFSWithWrongCPK(fs).listStatus(testPath); + Assertions.assertThat(listStatuses.length) + .describedAs("listStatuses should have 2 entries").isEqualTo(2); + } + + @Test + public void testCreatePathWithCPK() throws Exception { + testCreatePath(true); + } + + @Test + public void testCreatePathWithoutCPK() throws Exception { + testCreatePath(false); + } + + private void testCreatePath(final boolean isWithCPK) throws Exception { + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + final String testFileName = "/" + methodName.getMethodName(); + createFileAndGetContent(fs, testFileName, FILE_SIZE); + + AbfsClient abfsClient = fs.getAbfsClient(); + FsPermission permission = new FsPermission(FsAction.EXECUTE, + FsAction.EXECUTE, FsAction.EXECUTE); + FsPermission umask = new FsPermission(FsAction.NONE, FsAction.NONE, + FsAction.NONE); + boolean isNamespaceEnabled = fs.getIsNamespaceEnabled(); + AbfsRestOperation abfsRestOperation = abfsClient + .createPath(testFileName, true, true, + isNamespaceEnabled ? getOctalNotation(permission) : null, + isNamespaceEnabled ? getOctalNotation(umask) : null, false, null); + assertCPKHeaders(abfsRestOperation, isWithCPK); + assertResponseHeader(abfsRestOperation, isWithCPK, + X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); + assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, ""); + assertResponseHeader(abfsRestOperation, true, X_MS_REQUEST_SERVER_ENCRYPTED, + "true"); + + FileStatus[] listStatuses = fs.listStatus(new Path(testFileName)); + Assertions.assertThat(listStatuses.length) + .describedAs("listStatuses should have 1 entry").isEqualTo(1); + + listStatuses = getSameFSWithWrongCPK(fs).listStatus(new Path(testFileName)); + Assertions.assertThat(listStatuses.length) + .describedAs("listStatuses should have 1 entry").isEqualTo(1); + } + + @Test + public void testRenamePathWithCPK() throws Exception { + testRenamePath(true); + } + + @Test + public void testRenamePathWithoutCPK() throws Exception { + testRenamePath(false); + } + + private void testRenamePath(final boolean isWithCPK) throws Exception { + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + final String testFileName = "/" + methodName.getMethodName(); + createFileAndGetContent(fs, testFileName, FILE_SIZE); + + FileStatus fileStatusBeforeRename = fs + .getFileStatus(new Path(testFileName)); + + String newName = "/newName"; + AbfsClient abfsClient = fs.getAbfsClient(); + AbfsRestOperation abfsRestOperation = abfsClient + .renamePath(testFileName, newName, null); + assertCPKHeaders(abfsRestOperation, false); + assertNoCPKResponseHeadersPresent(abfsRestOperation); + + LambdaTestUtils.intercept(FileNotFoundException.class, + (() -> fs.getFileStatus(new Path(testFileName)))); + + FileStatus fileStatusAfterRename = fs.getFileStatus(new Path(newName)); + Assertions.assertThat(fileStatusAfterRename.getLen()) + .describedAs("File size has to be same before and after rename") + .isEqualTo(fileStatusBeforeRename.getLen()); + } + + @Test + public void testFlushWithCPK() throws Exception { + testFlush(true); + } + + @Test + public void testFlushWithoutCPK() throws Exception { + testFlush(false); + } + + private void testFlush(final boolean isWithCPK) throws Exception { + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + final String testFileName = "/" + methodName.getMethodName(); + fs.create(new Path(testFileName)); + AbfsClient abfsClient = fs.getAbfsClient(); + String expectedCPKSha = getCPKSha(fs); + + byte[] fileContent = getRandomBytesArray(FILE_SIZE); + Path testFilePath = new Path(testFileName + "1"); + FSDataOutputStream oStream = fs.create(testFilePath); + oStream.write(fileContent); + + // Trying to read with different CPK headers + Configuration conf = fs.getConf(); + String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME); + conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName, + "different-1234567890123456789012"); + try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); + AbfsClient abfsClient2 = fs2.getAbfsClient()) { + LambdaTestUtils.intercept(IOException.class, () -> { + abfsClient2.flush(testFileName, 0, false, false, null, null); + }); + } + + // Trying to read with no CPK headers + if (isWithCPK) { + conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName); + try (AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem + .get(conf); AbfsClient abfsClient3 = fs3.getAbfsClient()) { + LambdaTestUtils.intercept(IOException.class, () -> { + abfsClient3.flush(testFileName, 0, false, false, null, null); + }); + } + } + + // With correct CPK + AbfsRestOperation abfsRestOperation = abfsClient + .flush(testFileName, 0, false, false, null, null); + assertCPKHeaders(abfsRestOperation, isWithCPK); + assertResponseHeader(abfsRestOperation, isWithCPK, + X_MS_ENCRYPTION_KEY_SHA256, expectedCPKSha); + assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, ""); + assertResponseHeader(abfsRestOperation, true, X_MS_REQUEST_SERVER_ENCRYPTED, + isWithCPK + ""); + } + + @Test + public void testSetPathPropertiesWithCPK() throws Exception { + testSetPathProperties(true); + } + + @Test + public void testSetPathPropertiesWithoutCPK() throws Exception { + testSetPathProperties(false); + } + + private void testSetPathProperties(final boolean isWithCPK) throws Exception { + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + final String testFileName = "/" + methodName.getMethodName(); + createFileAndGetContent(fs, testFileName, FILE_SIZE); + + AbfsClient abfsClient = fs.getAbfsClient(); + final Hashtable properties = new Hashtable<>(); + properties.put("key", "val"); + AbfsRestOperation abfsRestOperation = abfsClient + .setPathProperties(testFileName, + convertXmsPropertiesToCommaSeparatedString(properties)); + assertCPKHeaders(abfsRestOperation, isWithCPK); + assertResponseHeader(abfsRestOperation, isWithCPK, + X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); + assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, ""); + assertResponseHeader(abfsRestOperation, true, X_MS_REQUEST_SERVER_ENCRYPTED, + "true"); + } + + @Test + public void testGetPathStatusFileWithCPK() throws Exception { + testGetPathStatusFile(true); + } + + @Test + public void testGetPathStatusFileWithoutCPK() throws Exception { + testGetPathStatusFile(false); + } + + private void testGetPathStatusFile(final boolean isWithCPK) throws Exception { + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + final String testFileName = "/" + methodName.getMethodName(); + createFileAndGetContent(fs, testFileName, FILE_SIZE); + + AbfsClient abfsClient = fs.getAbfsClient(); + AbfsRestOperation abfsRestOperation = abfsClient + .getPathStatus(testFileName, false); + assertCPKHeaders(abfsRestOperation, false); + assertResponseHeader(abfsRestOperation, isWithCPK, + X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); + assertResponseHeader(abfsRestOperation, true, X_MS_SERVER_ENCRYPTED, + "true"); + assertResponseHeader(abfsRestOperation, false, + X_MS_REQUEST_SERVER_ENCRYPTED, ""); + + abfsRestOperation = abfsClient.getPathStatus(testFileName, true); + assertCPKHeaders(abfsRestOperation, isWithCPK); + assertResponseHeader(abfsRestOperation, isWithCPK, + X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs)); + assertResponseHeader(abfsRestOperation, true, X_MS_SERVER_ENCRYPTED, + "true"); + assertResponseHeader(abfsRestOperation, false, + X_MS_REQUEST_SERVER_ENCRYPTED, ""); + } + + @Test + public void testDeletePathWithCPK() throws Exception { + testDeletePath(false); + } + + @Test + public void testDeletePathWithoutCPK() throws Exception { + testDeletePath(false); + } + + private void testDeletePath(final boolean isWithCPK) throws Exception { + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + final String testFileName = "/" + methodName.getMethodName(); + createFileAndGetContent(fs, testFileName, FILE_SIZE); + + FileStatus[] listStatuses = fs.listStatus(new Path(testFileName)); + Assertions.assertThat(listStatuses.length) + .describedAs("listStatuses should have 1 entry").isEqualTo(1); + + AbfsClient abfsClient = fs.getAbfsClient(); + AbfsRestOperation abfsRestOperation = abfsClient + .deletePath(testFileName, false, null); + assertCPKHeaders(abfsRestOperation, false); + assertNoCPKResponseHeadersPresent(abfsRestOperation); + + Assertions.assertThatThrownBy(() -> fs.listStatus(new Path(testFileName))) + .isInstanceOf(FileNotFoundException.class); + } + + @Test + public void testSetPermissionWithCPK() throws Exception { + testSetPermission(true); + } + + @Test + public void testSetPermissionWithoutCPK() throws Exception { + testSetPermission(false); + } + + private void testSetPermission(final boolean isWithCPK) throws Exception { + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + final String testFileName = "/" + methodName.getMethodName(); + Assume.assumeTrue(fs.getIsNamespaceEnabled()); + createFileAndGetContent(fs, testFileName, FILE_SIZE); + AbfsClient abfsClient = fs.getAbfsClient(); + FsPermission permission = new FsPermission(FsAction.EXECUTE, + FsAction.EXECUTE, FsAction.EXECUTE); + AbfsRestOperation abfsRestOperation = abfsClient + .setPermission(testFileName, permission.toString()); + assertCPKHeaders(abfsRestOperation, false); + assertNoCPKResponseHeadersPresent(abfsRestOperation); + } + + @Test + public void testSetAclWithCPK() throws Exception { + testSetAcl(true); + } + + @Test + public void testSetAclWithoutCPK() throws Exception { + testSetAcl(false); + } + + private void testSetAcl(final boolean isWithCPK) throws Exception { + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + final String testFileName = "/" + methodName.getMethodName(); + Assume.assumeTrue(fs.getIsNamespaceEnabled()); + createFileAndGetContent(fs, testFileName, FILE_SIZE); + AbfsClient abfsClient = fs.getAbfsClient(); + + List aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL)); + final Map aclEntries = AbfsAclHelper + .deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); + + AbfsRestOperation abfsRestOperation = abfsClient + .setAcl(testFileName, AbfsAclHelper.serializeAclSpec(aclEntries)); + assertCPKHeaders(abfsRestOperation, false); + assertNoCPKResponseHeadersPresent(abfsRestOperation); + } + + @Test + public void testGetAclWithCPK() throws Exception { + testGetAcl(true); + } + + @Test + public void testGetAclWithoutCPK() throws Exception { + testGetAcl(false); + } + + private void testGetAcl(final boolean isWithCPK) throws Exception { + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + final String testFileName = "/" + methodName.getMethodName(); + Assume.assumeTrue(fs.getIsNamespaceEnabled()); + createFileAndGetContent(fs, testFileName, FILE_SIZE); + AbfsClient abfsClient = fs.getAbfsClient(); + AbfsRestOperation abfsRestOperation = abfsClient.getAclStatus(testFileName); + assertCPKHeaders(abfsRestOperation, false); + assertNoCPKResponseHeadersPresent(abfsRestOperation); + } + + @Test + public void testCheckAccessWithCPK() throws Exception { + testCheckAccess(true); + } + + @Test + public void testCheckAccessWithoutCPK() throws Exception { + testCheckAccess(false); + } + + private void testCheckAccess(final boolean isWithCPK) throws Exception { + boolean isHNSEnabled = getConfiguration() + .getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false); + Assume.assumeTrue(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false", + isHNSEnabled); + Assume.assumeTrue("AuthType has to be OAuth", + getAuthType() == AuthType.OAuth); + + final AzureBlobFileSystem fs = getAbfs(isWithCPK); + final String testFileName = "/" + methodName.getMethodName(); + fs.create(new Path(testFileName)); + AbfsClient abfsClient = fs.getAbfsClient(); + AbfsRestOperation abfsRestOperation = abfsClient + .checkAccess(testFileName, "rwx"); + assertCPKHeaders(abfsRestOperation, false); + assertNoCPKResponseHeadersPresent(abfsRestOperation); + } + + private byte[] createFileAndGetContent(AzureBlobFileSystem fs, + String fileName, int fileSize) throws IOException { + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + ContractTestUtils.verifyFileContents(fs, testFilePath, fileContent); + return fileContent; + } + + private void assertCPKHeaders(AbfsRestOperation abfsRestOperation, + boolean isCPKHeaderExpected) { + assertHeader(abfsRestOperation, X_MS_ENCRYPTION_KEY, isCPKHeaderExpected); + assertHeader(abfsRestOperation, X_MS_ENCRYPTION_KEY_SHA256, + isCPKHeaderExpected); + assertHeader(abfsRestOperation, X_MS_ENCRYPTION_ALGORITHM, + isCPKHeaderExpected); + } + + private void assertNoCPKResponseHeadersPresent( + AbfsRestOperation abfsRestOperation) { + assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, ""); + assertResponseHeader(abfsRestOperation, false, + X_MS_REQUEST_SERVER_ENCRYPTED, ""); + assertResponseHeader(abfsRestOperation, false, X_MS_ENCRYPTION_KEY_SHA256, + ""); + } + + private void assertResponseHeader(AbfsRestOperation abfsRestOperation, + boolean isHeaderExpected, String headerName, String expectedValue) { + final AbfsHttpOperation result = abfsRestOperation.getResult(); + final String value = result.getResponseHeader(headerName); + if (isHeaderExpected) { + Assertions.assertThat(value).isEqualTo(expectedValue); + } else { + Assertions.assertThat(value).isNull(); + } + } + + private void assertHeader(AbfsRestOperation abfsRestOperation, + String headerName, boolean isCPKHeaderExpected) { + assertTrue(abfsRestOperation != null); + Optional header = abfsRestOperation.getRequestHeaders() + .stream().filter(abfsHttpHeader -> abfsHttpHeader.getName() + .equalsIgnoreCase(headerName)).findFirst(); + String desc; + if (isCPKHeaderExpected) { + desc = + "CPK header " + headerName + " is expected, but the same is absent."; + } else { + desc = "CPK header " + headerName + + " is not expected, but the same is present."; + } + Assertions.assertThat(header.isPresent()).describedAs(desc) + .isEqualTo(isCPKHeaderExpected); + } + + private byte[] getSHA256Hash(String key) throws IOException { + try { + final MessageDigest digester = MessageDigest.getInstance("SHA-256"); + return digester.digest(key.getBytes(StandardCharsets.UTF_8)); + } catch (NoSuchAlgorithmException e) { + throw new IOException(e); + } + } + + private String getCPKSha(final AzureBlobFileSystem abfs) throws IOException { + Configuration conf = abfs.getConf(); + String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME); + String encryptionKey = conf + .get(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName); + if (encryptionKey == null || encryptionKey.isEmpty()) { + return ""; + } + return getBase64EncodedString(getSHA256Hash(encryptionKey)); + } + + private String getBase64EncodedString(byte[] bytes) { + return java.util.Base64.getEncoder().encodeToString(bytes); + } + + private Path createFileWithContent(FileSystem fs, String fileName, + byte[] fileContent) throws IOException { + Path testFilePath = new Path(fileName); + try (FSDataOutputStream oStream = fs.create(testFilePath)) { + oStream.write(fileContent); + oStream.flush(); + } + return testFilePath; + } + + private String convertXmsPropertiesToCommaSeparatedString( + final Hashtable properties) + throws CharacterCodingException { + StringBuilder commaSeparatedProperties = new StringBuilder(); + final CharsetEncoder encoder = Charset.forName(XMS_PROPERTIES_ENCODING) + .newEncoder(); + for (Map.Entry propertyEntry : properties.entrySet()) { + String key = propertyEntry.getKey(); + String value = propertyEntry.getValue(); + Boolean canEncodeValue = encoder.canEncode(value); + if (!canEncodeValue) { + throw new CharacterCodingException(); + } + String encodedPropertyValue = Base64 + .encode(encoder.encode(CharBuffer.wrap(value)).array()); + commaSeparatedProperties.append(key).append(AbfsHttpConstants.EQUAL) + .append(encodedPropertyValue); + commaSeparatedProperties.append(AbfsHttpConstants.COMMA); + } + if (commaSeparatedProperties.length() != 0) { + commaSeparatedProperties + .deleteCharAt(commaSeparatedProperties.length() - 1); + } + return commaSeparatedProperties.toString(); + } + + private String getOctalNotation(FsPermission fsPermission) { + Preconditions.checkNotNull(fsPermission, "fsPermission"); + return String + .format(AbfsHttpConstants.PERMISSION_FORMAT, fsPermission.toOctal()); + } + + private byte[] getRandomBytesArray(int length) { + final byte[] b = new byte[length]; + new Random().nextBytes(b); + return b; + } + + private AzureBlobFileSystem getAbfs(boolean withCPK) throws IOException { + return getAbfs(withCPK, "12345678901234567890123456789012"); + } + + private AzureBlobFileSystem getAbfs(boolean withCPK, String cpk) + throws IOException { + Configuration conf = getRawConfiguration(); + if (withCPK) { + conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + getAccountName(), + cpk); + } else { + conf.unset( + FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + getAccountName()); + } + return (AzureBlobFileSystem) FileSystem.newInstance(conf); + } + + private AzureBlobFileSystem getSameFSWithWrongCPK( + final AzureBlobFileSystem fs) throws IOException { + AbfsConfiguration abfsConf = fs.getAbfsStore().getAbfsConfiguration(); + Configuration conf = abfsConf.getRawConfiguration(); + String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME); + String cpk = conf + .get(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName); + if (cpk == null || cpk.isEmpty()) { + cpk = "01234567890123456789012345678912"; + } + cpk = "different-" + cpk; + String differentCpk = cpk.substring(0, ENCRYPTION_KEY_LEN - 1); + conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName, + differentCpk); + conf.set("fs.defaultFS", + "abfs://" + getFileSystemName() + "@" + accountName); + AzureBlobFileSystem sameFSWithDifferentCPK = + (AzureBlobFileSystem) FileSystem.newInstance(conf); + return sameFSWithDifferentCPK; + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java index 72ea7661b5a90..565eb38c4f70a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java @@ -28,6 +28,7 @@ public final class TestConfigurationKeys { public static final String FS_AZURE_CONTRACT_TEST_URI = "fs.contract.test.fs.abfs"; public static final String FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT = "fs.azure.test.namespace.enabled"; public static final String FS_AZURE_TEST_APPENDBLOB_ENABLED = "fs.azure.test.appendblob.enabled"; + public static final String FS_AZURE_TEST_CPK_ENABLED = "fs.azure.test.cpk.enabled"; public static final String FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_ID = "fs.azure.account.oauth2.contributor.client.id"; public static final String FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET = "fs.azure.account.oauth2.contributor.client.secret"; @@ -54,6 +55,9 @@ public final class TestConfigurationKeys { public static final String FS_AZURE_TEST_APP_SECRET = "fs.azure.test.app.secret"; + public static final String FS_AZURE_TEST_CPK_ENABLED_SECONDARY_ACCOUNT = "fs.azure.test.cpk-enabled-secondary-account"; + public static final String FS_AZURE_TEST_CPK_ENABLED_SECONDARY_ACCOUNT_KEY = "fs.azure.test.cpk-enabled-secondary-account.key"; + public static final String TEST_CONFIGURATION_FILE_NAME = "azure-test.xml"; public static final String TEST_CONTAINER_PREFIX = "abfs-testcontainer-"; public static final int TEST_TIMEOUT = 15 * 60 * 1000; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java index 4facc10aeff0b..a725bf3175a5c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.lang.reflect.Field; -import java.net.MalformedURLException; import java.net.URL; import java.util.List; import java.util.regex.Pattern; @@ -103,7 +102,7 @@ public TestAbfsClient(){ } private String getUserAgentString(AbfsConfiguration config, - boolean includeSSLProvider) throws MalformedURLException { + boolean includeSSLProvider) throws IOException { AbfsClientContext abfsClientContext = new AbfsClientContextBuilder().build(); AbfsClient client = new AbfsClient(new URL("https://azure.com"), null, config, (AccessTokenProvider) null, abfsClientContext); @@ -250,8 +249,7 @@ public void verifyUserAgentClusterType() throws Exception { public static AbfsClient createTestClientFromCurrentContext( AbfsClient baseAbfsClientInstance, - AbfsConfiguration abfsConfig) - throws AzureBlobFileSystemException { + AbfsConfiguration abfsConfig) throws IOException { AuthType currentAuthType = abfsConfig.getAuthType( abfsConfig.getAccountName()); From 1b6383ff45d2ef3c2f0f206ec87b9415cb48b8ee Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Tue, 27 Apr 2021 14:01:53 +0200 Subject: [PATCH 279/666] YARN-10739. GenericEventHandler.printEventQueueDetails causes RM recovery to take too much time. Contributed by Qi Zhu. --- .../hadoop/yarn/event/AsyncDispatcher.java | 18 +++++++++++++++++- .../hadoop/yarn/event/TestAsyncDispatcher.java | 2 -- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index 667515d00c104..ba6bb435ec25b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -25,7 +25,11 @@ import java.util.Map; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.yarn.metrics.EventTypeMetrics; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.MonotonicClock; @@ -93,6 +97,8 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher { private Clock clock = new MonotonicClock(); + private ThreadPoolExecutor printEventDetailsExecutor; + /** * The thread name for dispatcher. */ @@ -179,6 +185,15 @@ protected void serviceInit(Configuration conf) throws Exception{ YARN_DISPATCHER_PRINT_EVENTS_INFO_THRESHOLD, YarnConfiguration. DEFAULT_YARN_DISPATCHER_PRINT_EVENTS_INFO_THRESHOLD); + + ThreadFactory threadFactory = new ThreadFactoryBuilder() + .setNameFormat("PrintEventDetailsThread #%d") + .build(); + // Thread pool for async print event details, + // to prevent wasting too much time for RM. + printEventDetailsExecutor = new ThreadPoolExecutor( + 1, 5, 10, TimeUnit.SECONDS, + new LinkedBlockingQueue<>(), threadFactory); } @Override @@ -222,6 +237,7 @@ protected void serviceStop() throws Exception { LOG.warn("Interrupted Exception while stopping", ie); } } + printEventDetailsExecutor.shutdownNow(); // stop all the components super.serviceStop(); @@ -319,7 +335,7 @@ public void handle(Event event) { if (qSize != 0 && qSize % detailsInterval == 0 && lastEventDetailsQueueSizeLogged != qSize) { lastEventDetailsQueueSizeLogged = qSize; - printEventQueueDetails(); + printEventDetailsExecutor.submit(this::printEventQueueDetails); printTrigger = true; } int remCapacity = eventQueue.remainingCapacity(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java index 7d2572a4c118f..8b2dfa08b0dd0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java @@ -187,8 +187,6 @@ public void testPrintDispatcherEventDetails() throws Exception { when(event.getType()).thenReturn(TestEnum.TestEventType); dispatcher.getEventHandler().handle(event); } - verify(log, atLeastOnce()).info("Event type: TestEventType, " + - "Event record counter: 5000"); Thread.sleep(2000); //Make sure more than one event to take verify(log, atLeastOnce()). From 48a70fcdcbdccc922aadf0b2bc1caf2b632970fd Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Wed, 28 Apr 2021 10:03:39 +0900 Subject: [PATCH 280/666] HADOOP-17674. Use spotbugs-maven-plugin in hadoop-huaweicloud. (#2962) Reviewed-by: Wei-Chiu Chuang --- hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml b/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml index 963fa37426cc6..43360c11cd9d2 100755 --- a/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml @@ -60,10 +60,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml From 2016dfbb7faa23f8ccd5f308db58d6d4805d8bad Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Tue, 27 Apr 2021 20:44:13 -0700 Subject: [PATCH 281/666] YARN-10747. Bump YARN CSI protobuf version to 3.7.1 (#2946) Signed-off-by: Akira Ajisaka Change-Id: Ib2a4bf775a5804641a91c6f9a486772c9d87c17e --- .../hadoop-yarn/hadoop-yarn-csi/pom.xml | 40 +++++++++++++++++-- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml index 22565c14bcc43..62707eadb9d22 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml @@ -26,7 +26,6 @@ jar - 3.6.1 1.26.0 1.5.0.Final @@ -39,7 +38,7 @@ com.google.protobuf protobuf-java - ${protobuf.version} + ${hadoop.protobuf.version} io.netty @@ -54,6 +53,13 @@ io.grpc grpc-protobuf ${grpc.version} + + + + com.google.protobuf + protobuf-java + + io.grpc @@ -91,21 +97,49 @@ hadoop-common test-jar test + + + + com.google.protobuf + protobuf-java + + org.apache.hadoop hadoop-common provided + + + + com.google.protobuf + protobuf-java + + org.apache.hadoop hadoop-yarn-common provided + + + + com.google.protobuf + protobuf-java + + org.apache.hadoop hadoop-yarn-api provided + + + + com.google.protobuf + protobuf-java + + javax.annotation @@ -158,7 +192,7 @@ protobuf-maven-plugin ${protobuf-maven-plugin.version} - com.google.protobuf:protoc:3.6.1:exe:${os.detected.classifier} + com.google.protobuf:protoc:${hadoop.protobuf.version}:exe:${os.detected.classifier} grpc-java io.grpc:protoc-gen-grpc-java:1.26.0:exe:${os.detected.classifier} From 3629361a045e13cb2268a80ca94e5f5ad56abc2b Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 28 Apr 2021 23:43:11 -0700 Subject: [PATCH 282/666] HADOOP-11245. Update NFS gateway to use Netty4 (#2832) Reviewed-by: Tsz-Wo Nicholas Sze --- hadoop-common-project/hadoop-nfs/pom.xml | 2 +- .../org/apache/hadoop/mount/MountdBase.java | 14 +- .../org/apache/hadoop/nfs/nfs3/Nfs3Base.java | 7 +- .../hadoop/oncrpc/RegistrationClient.java | 13 +- .../org/apache/hadoop/oncrpc/RpcInfo.java | 12 +- .../org/apache/hadoop/oncrpc/RpcProgram.java | 19 ++- .../org/apache/hadoop/oncrpc/RpcResponse.java | 23 ++-- .../org/apache/hadoop/oncrpc/RpcUtil.java | 123 +++++++++-------- .../apache/hadoop/oncrpc/SimpleTcpClient.java | 78 ++++++----- .../hadoop/oncrpc/SimpleTcpClientHandler.java | 30 ++--- .../apache/hadoop/oncrpc/SimpleTcpServer.java | 76 ++++++----- .../apache/hadoop/oncrpc/SimpleUdpServer.java | 65 +++++---- .../java/org/apache/hadoop/oncrpc/XDR.java | 12 +- .../org/apache/hadoop/portmap/Portmap.java | 126 +++++++++++------- .../hadoop/portmap/RpcProgramPortmap.java | 46 ++++--- .../hadoop/oncrpc/TestFrameDecoder.java | 100 +++++++------- .../apache/hadoop/portmap/TestPortmap.java | 2 +- hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 2 +- .../hdfs/nfs/mount/RpcProgramMountd.java | 12 +- .../hadoop/hdfs/nfs/nfs3/Nfs3Utils.java | 12 +- .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 2 +- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 14 +- .../apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java | 2 +- .../hadoop/hdfs/nfs/nfs3/WriteManager.java | 2 +- .../hadoop/hdfs/nfs/TestOutOfOrderWrite.java | 32 ++--- .../hdfs/nfs/nfs3/TestRpcProgramNfs3.java | 2 +- .../hadoop/hdfs/nfs/nfs3/TestWrites.java | 2 +- 27 files changed, 472 insertions(+), 358 deletions(-) diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml index f5de8407a0920..33d8b3710c528 100644 --- a/hadoop-common-project/hadoop-nfs/pom.xml +++ b/hadoop-common-project/hadoop-nfs/pom.xml @@ -90,7 +90,7 @@ io.netty - netty + netty-all compile diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java index 0ff3084bf3eb9..58d3e51f2bdfb 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java @@ -41,6 +41,8 @@ abstract public class MountdBase { private final RpcProgram rpcProgram; private int udpBoundPort; // Will set after server starts private int tcpBoundPort; // Will set after server starts + private SimpleUdpServer udpServer = null; + private SimpleTcpServer tcpServer = null; public RpcProgram getRpcProgram() { return rpcProgram; @@ -57,7 +59,7 @@ public MountdBase(RpcProgram program) throws IOException { /* Start UDP server */ private void startUDPServer() { - SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(), + udpServer = new SimpleUdpServer(rpcProgram.getPort(), rpcProgram, 1); rpcProgram.startDaemons(); try { @@ -76,7 +78,7 @@ private void startUDPServer() { /* Start TCP server */ private void startTCPServer() { - SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(), + tcpServer = new SimpleTcpServer(rpcProgram.getPort(), rpcProgram, 1); rpcProgram.startDaemons(); try { @@ -118,6 +120,14 @@ public void stop() { rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort); tcpBoundPort = 0; } + if (udpServer != null) { + udpServer.shutdown(); + udpServer = null; + } + if (tcpServer != null) { + tcpServer.shutdown(); + tcpServer = null; + } } /** diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java index ff83a5f19bee1..e6ea29b42bff4 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java @@ -35,6 +35,7 @@ public abstract class Nfs3Base { public static final Logger LOG = LoggerFactory.getLogger(Nfs3Base.class); private final RpcProgram rpcProgram; private int nfsBoundPort; // Will set after server starts + private SimpleTcpServer tcpServer = null; public RpcProgram getRpcProgram() { return rpcProgram; @@ -61,7 +62,7 @@ public void start(boolean register) { } private void startTCPServer() { - SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(), + tcpServer = new SimpleTcpServer(rpcProgram.getPort(), rpcProgram, 0); rpcProgram.startDaemons(); try { @@ -84,6 +85,10 @@ public void stop() { nfsBoundPort = 0; } rpcProgram.stopDaemons(); + if (tcpServer != null) { + tcpServer.shutdown(); + tcpServer = null; + } } /** * Priority of the nfsd shutdown hook. diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java index c8528ba4d558f..c96f1d53bb4c5 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java @@ -19,10 +19,9 @@ import java.util.Arrays; +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.channel.ChannelHandlerContext; -import org.jboss.netty.channel.MessageEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,10 +57,10 @@ private boolean validMessageLength(int len) { } @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { - ChannelBuffer buf = (ChannelBuffer) e.getMessage(); // Read reply + public void channelRead(ChannelHandlerContext ctx, Object msg) { + ByteBuf buf = (ByteBuf) msg; // Read reply if (!validMessageLength(buf.readableBytes())) { - e.getChannel().close(); + ctx.channel().close(); return; } @@ -83,7 +82,7 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { RpcDeniedReply deniedReply = (RpcDeniedReply) reply; handle(deniedReply); } - e.getChannel().close(); // shutdown now that request is complete + ctx.channel().close(); // shutdown now that request is complete } private void handle(RpcDeniedReply deniedReply) { diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcInfo.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcInfo.java index b434d79285c6f..aba8e9ea2624e 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcInfo.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcInfo.java @@ -19,9 +19,9 @@ import java.net.SocketAddress; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelHandlerContext; +import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; /** * RpcInfo records all contextual information of an RPC message. It contains @@ -29,11 +29,11 @@ */ public final class RpcInfo { private final RpcMessage header; - private final ChannelBuffer data; + private final ByteBuf data; private final Channel channel; private final SocketAddress remoteAddress; - public RpcInfo(RpcMessage header, ChannelBuffer data, + public RpcInfo(RpcMessage header, ByteBuf data, ChannelHandlerContext channelContext, Channel channel, SocketAddress remoteAddress) { this.header = header; @@ -46,7 +46,7 @@ public RpcMessage header() { return header; } - public ChannelBuffer data() { + public ByteBuf data() { return data; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java index d4b2261e05f22..be927b37efbc3 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java @@ -22,17 +22,16 @@ import java.net.InetSocketAddress; import java.net.SocketAddress; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState; import org.apache.hadoop.oncrpc.security.Verifier; import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.portmap.PortmapRequest; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; -import org.jboss.netty.channel.ChannelHandlerContext; -import org.jboss.netty.channel.MessageEvent; -import org.jboss.netty.channel.SimpleChannelUpstreamHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,7 +39,7 @@ * Class for writing RPC server programs based on RFC 1050. Extend this class * and implement {@link #handleInternal} to handle the requests received. */ -public abstract class RpcProgram extends SimpleChannelUpstreamHandler { +public abstract class RpcProgram extends ChannelInboundHandlerAdapter { static final Logger LOG = LoggerFactory.getLogger(RpcProgram.class); public static final int RPCB_PORT = 111; private final String program; @@ -162,9 +161,9 @@ public void startDaemons() {} public void stopDaemons() {} @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - RpcInfo info = (RpcInfo) e.getMessage(); + RpcInfo info = (RpcInfo) msg; RpcCall call = (RpcCall) info.header(); SocketAddress remoteAddress = info.remoteAddress(); @@ -222,7 +221,7 @@ private void sendAcceptedReply(RpcCall call, SocketAddress remoteAddress, out.writeInt(lowProgVersion); out.writeInt(highProgVersion); } - ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap() + ByteBuf b = Unpooled.wrappedBuffer(out.asReadOnlyWrap() .buffer()); RpcResponse rsp = new RpcResponse(b, remoteAddress); RpcUtil.sendRpcResponse(ctx, rsp); @@ -235,7 +234,7 @@ protected static void sendRejectedReply(RpcCall call, RpcReply.ReplyState.MSG_DENIED, RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone()); reply.write(out); - ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap() + ByteBuf buf = Unpooled.wrappedBuffer(out.asReadOnlyWrap() .buffer()); RpcResponse rsp = new RpcResponse(buf, remoteAddress); RpcUtil.sendRpcResponse(ctx, rsp); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcResponse.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcResponse.java index 2e45e6100b108..0d6431f68bd5a 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcResponse.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcResponse.java @@ -19,27 +19,30 @@ import java.net.SocketAddress; -import org.jboss.netty.buffer.ChannelBuffer; +import io.netty.buffer.ByteBuf; +import io.netty.channel.DefaultAddressedEnvelope; /** * RpcResponse encapsulates a response to a RPC request. It contains the data * that is going to cross the wire, as well as the information of the remote * peer. */ -public class RpcResponse { - private final ChannelBuffer data; - private final SocketAddress remoteAddress; +public class RpcResponse extends + DefaultAddressedEnvelope { + public RpcResponse(ByteBuf message, SocketAddress recipient) { + super(message, recipient, null); + } - public RpcResponse(ChannelBuffer data, SocketAddress remoteAddress) { - this.data = data; - this.remoteAddress = remoteAddress; + public RpcResponse(ByteBuf message, SocketAddress recipient, + SocketAddress sender) { + super(message, recipient, sender); } - public ChannelBuffer data() { - return data; + public ByteBuf data() { + return this.content(); } public SocketAddress remoteAddress() { - return remoteAddress; + return this.recipient(); } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java index cebebd27d0c4b..e8bc27d687fea 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java @@ -17,16 +17,18 @@ */ package org.apache.hadoop.oncrpc; +import java.net.SocketAddress; import java.nio.ByteBuffer; - -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelHandlerContext; -import org.jboss.netty.channel.Channels; -import org.jboss.netty.channel.MessageEvent; -import org.jboss.netty.channel.SimpleChannelUpstreamHandler; -import org.jboss.netty.handler.codec.frame.FrameDecoder; +import java.util.List; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.socket.DatagramPacket; +import io.netty.handler.codec.ByteToMessageDecoder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,16 +45,16 @@ public static int getNewXid(String caller) { public static void sendRpcResponse(ChannelHandlerContext ctx, RpcResponse response) { - Channels.fireMessageReceived(ctx, response); + ctx.fireChannelRead(response); } - public static FrameDecoder constructRpcFrameDecoder() { + public static ByteToMessageDecoder constructRpcFrameDecoder() { return new RpcFrameDecoder(); } - public static final SimpleChannelUpstreamHandler STAGE_RPC_MESSAGE_PARSER = new RpcMessageParserStage(); - public static final SimpleChannelUpstreamHandler STAGE_RPC_TCP_RESPONSE = new RpcTcpResponseStage(); - public static final SimpleChannelUpstreamHandler STAGE_RPC_UDP_RESPONSE = new RpcUdpResponseStage(); + public static final ChannelInboundHandlerAdapter STAGE_RPC_MESSAGE_PARSER = new RpcMessageParserStage(); + public static final ChannelInboundHandlerAdapter STAGE_RPC_TCP_RESPONSE = new RpcTcpResponseStage(); + public static final ChannelInboundHandlerAdapter STAGE_RPC_UDP_RESPONSE = new RpcUdpResponseStage(); /** * An RPC client can separate a RPC message into several frames (i.e., @@ -62,44 +64,39 @@ public static FrameDecoder constructRpcFrameDecoder() { * RpcFrameDecoder is a stateful pipeline stage. It has to be constructed for * each RPC client. */ - static class RpcFrameDecoder extends FrameDecoder { + static class RpcFrameDecoder extends ByteToMessageDecoder { public static final Logger LOG = LoggerFactory.getLogger(RpcFrameDecoder.class); - private ChannelBuffer currentFrame; + private volatile boolean isLast; @Override - protected Object decode(ChannelHandlerContext ctx, Channel channel, - ChannelBuffer buf) { + protected void decode(ChannelHandlerContext ctx, ByteBuf buf, + List out) { - if (buf.readableBytes() < 4) - return null; + if (buf.readableBytes() < 4) { + return; + } buf.markReaderIndex(); byte[] fragmentHeader = new byte[4]; buf.readBytes(fragmentHeader); int length = XDR.fragmentSize(fragmentHeader); - boolean isLast = XDR.isLastFragment(fragmentHeader); + isLast = XDR.isLastFragment(fragmentHeader); if (buf.readableBytes() < length) { buf.resetReaderIndex(); - return null; + return; } - ChannelBuffer newFragment = buf.readSlice(length); - if (currentFrame == null) { - currentFrame = newFragment; - } else { - currentFrame = ChannelBuffers.wrappedBuffer(currentFrame, newFragment); - } + ByteBuf newFragment = buf.readSlice(length); + newFragment.retain(); + out.add(newFragment); + } - if (isLast) { - ChannelBuffer completeFrame = currentFrame; - currentFrame = null; - return completeFrame; - } else { - return null; - } + @VisibleForTesting + public boolean isLast() { + return isLast; } } @@ -107,30 +104,44 @@ protected Object decode(ChannelHandlerContext ctx, Channel channel, * RpcMessageParserStage parses the network bytes and encapsulates the RPC * request into a RpcInfo instance. */ - static final class RpcMessageParserStage extends SimpleChannelUpstreamHandler { + @ChannelHandler.Sharable + static final class RpcMessageParserStage extends ChannelInboundHandlerAdapter { private static final Logger LOG = LoggerFactory .getLogger(RpcMessageParserStage.class); @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - ChannelBuffer buf = (ChannelBuffer) e.getMessage(); - ByteBuffer b = buf.toByteBuffer().asReadOnlyBuffer(); + ByteBuf buf; + SocketAddress remoteAddress; + if (msg instanceof DatagramPacket) { + DatagramPacket packet = (DatagramPacket)msg; + buf = packet.content(); + remoteAddress = packet.sender(); + } else { + buf = (ByteBuf) msg; + remoteAddress = ctx.channel().remoteAddress(); + } + + ByteBuffer b = buf.nioBuffer().asReadOnlyBuffer(); XDR in = new XDR(b, XDR.State.READING); RpcInfo info = null; try { RpcCall callHeader = RpcCall.read(in); - ChannelBuffer dataBuffer = ChannelBuffers.wrappedBuffer(in.buffer() + ByteBuf dataBuffer = Unpooled.wrappedBuffer(in.buffer() .slice()); - info = new RpcInfo(callHeader, dataBuffer, ctx, e.getChannel(), - e.getRemoteAddress()); + + info = new RpcInfo(callHeader, dataBuffer, ctx, ctx.channel(), + remoteAddress); } catch (Exception exc) { - LOG.info("Malformed RPC request from " + e.getRemoteAddress()); + LOG.info("Malformed RPC request from " + remoteAddress); + } finally { + buf.release(); } if (info != null) { - Channels.fireMessageReceived(ctx, info); + ctx.fireChannelRead(info); } } } @@ -139,16 +150,17 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) * RpcTcpResponseStage sends an RpcResponse across the wire with the * appropriate fragment header. */ - private static class RpcTcpResponseStage extends SimpleChannelUpstreamHandler { + @ChannelHandler.Sharable + private static class RpcTcpResponseStage extends ChannelInboundHandlerAdapter { @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - RpcResponse r = (RpcResponse) e.getMessage(); + RpcResponse r = (RpcResponse) msg; byte[] fragmentHeader = XDR.recordMark(r.data().readableBytes(), true); - ChannelBuffer header = ChannelBuffers.wrappedBuffer(fragmentHeader); - ChannelBuffer d = ChannelBuffers.wrappedBuffer(header, r.data()); - e.getChannel().write(d); + ByteBuf header = Unpooled.wrappedBuffer(fragmentHeader); + ByteBuf d = Unpooled.wrappedBuffer(header, r.data()); + ctx.channel().writeAndFlush(d); } } @@ -156,14 +168,17 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) * RpcUdpResponseStage sends an RpcResponse as a UDP packet, which does not * require a fragment header. */ + @ChannelHandler.Sharable private static final class RpcUdpResponseStage extends - SimpleChannelUpstreamHandler { + ChannelInboundHandlerAdapter { @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - RpcResponse r = (RpcResponse) e.getMessage(); - e.getChannel().write(r.data(), r.remoteAddress()); + RpcResponse r = (RpcResponse) msg; + // TODO: check out https://github.com/netty/netty/issues/1282 for + // correct usage + ctx.channel().writeAndFlush(r.data()); } } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClient.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClient.java index 32e1b4b839218..7cfef6439b059 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClient.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClient.java @@ -18,15 +18,16 @@ package org.apache.hadoop.oncrpc; import java.net.InetSocketAddress; -import java.util.concurrent.Executors; -import org.jboss.netty.bootstrap.ClientBootstrap; -import org.jboss.netty.channel.ChannelFactory; -import org.jboss.netty.channel.ChannelFuture; -import org.jboss.netty.channel.ChannelPipeline; -import org.jboss.netty.channel.ChannelPipelineFactory; -import org.jboss.netty.channel.Channels; -import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory; +import io.netty.bootstrap.Bootstrap; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A simple TCP based RPC client which just sends a request to a server. @@ -35,8 +36,9 @@ public class SimpleTcpClient { protected final String host; protected final int port; protected final XDR request; - protected ChannelPipelineFactory pipelineFactory; protected final boolean oneShot; + private NioEventLoopGroup workerGroup; + private ChannelFuture future; public SimpleTcpClient(String host, int port, XDR request) { this(host,port, request, true); @@ -48,40 +50,54 @@ public SimpleTcpClient(String host, int port, XDR request, Boolean oneShot) { this.request = request; this.oneShot = oneShot; } - - protected ChannelPipelineFactory setPipelineFactory() { - this.pipelineFactory = new ChannelPipelineFactory() { + + protected ChannelInitializer setChannelHandler() { + return new ChannelInitializer() { @Override - public ChannelPipeline getPipeline() { - return Channels.pipeline( + protected void initChannel(SocketChannel ch) throws Exception { + ChannelPipeline p = ch.pipeline(); + p.addLast( RpcUtil.constructRpcFrameDecoder(), - new SimpleTcpClientHandler(request)); + new SimpleTcpClientHandler(request) + ); } }; - return this.pipelineFactory; } + @VisibleForTesting public void run() { // Configure the client. - ChannelFactory factory = new NioClientSocketChannelFactory( - Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), 1, 1); - ClientBootstrap bootstrap = new ClientBootstrap(factory); - - // Set up the pipeline factory. - bootstrap.setPipelineFactory(setPipelineFactory()); - - bootstrap.setOption("tcpNoDelay", true); - bootstrap.setOption("keepAlive", true); + workerGroup = new NioEventLoopGroup(); + Bootstrap bootstrap = new Bootstrap() + .group(workerGroup) + .channel(NioSocketChannel.class); - // Start the connection attempt. - ChannelFuture future = bootstrap.connect(new InetSocketAddress(host, port)); + try { + future = bootstrap.handler(setChannelHandler()) + .option(ChannelOption.TCP_NODELAY, true) + .option(ChannelOption.SO_KEEPALIVE, true) + .connect(new InetSocketAddress(host, port)).sync(); + } catch (InterruptedException e) { + e.printStackTrace(); + } finally { + if (oneShot) { + stop(); + } + } + } - if (oneShot) { - // Wait until the connection is closed or the connection attempt fails. - future.getChannel().getCloseFuture().awaitUninterruptibly(); + public void stop() { + try { + if (future != null) { + // Wait until the connection is closed or the connection attempt fails. + future.channel().closeFuture().sync(); + } + } catch (InterruptedException e) { + e.printStackTrace(); + } finally { // Shut down thread pools to exit. - bootstrap.releaseExternalResources(); + workerGroup.shutdownGracefully(); } } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java index 23b6682361c9b..1acefc857f830 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java @@ -17,19 +17,19 @@ */ package org.apache.hadoop.oncrpc; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.channel.ChannelHandlerContext; -import org.jboss.netty.channel.ChannelStateEvent; -import org.jboss.netty.channel.ExceptionEvent; -import org.jboss.netty.channel.MessageEvent; -import org.jboss.netty.channel.SimpleChannelHandler; +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.util.ReferenceCountUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * A simple TCP based RPC client handler used by {@link SimpleTcpServer}. */ -public class SimpleTcpClientHandler extends SimpleChannelHandler { +public class SimpleTcpClientHandler extends ChannelInboundHandlerAdapter { public static final Logger LOG = LoggerFactory.getLogger(SimpleTcpClient.class); protected final XDR request; @@ -39,13 +39,13 @@ public SimpleTcpClientHandler(XDR request) { } @Override - public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent e) { + public void channelActive(ChannelHandlerContext ctx) throws Exception { // Send the request if (LOG.isDebugEnabled()) { LOG.debug("sending PRC request"); } - ChannelBuffer outBuf = XDR.writeMessageTcp(request, true); - e.getChannel().write(outBuf); + ByteBuf outBuf = XDR.writeMessageTcp(request, true); + ctx.channel().writeAndFlush(outBuf); } /** @@ -53,13 +53,13 @@ public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent e) { * more interaction with the server. */ @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { - e.getChannel().close(); + public void channelRead(ChannelHandlerContext ctx, Object msg) { + ctx.channel().close(); } @Override - public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) { - LOG.warn("Unexpected exception from downstream: ", e.getCause()); - e.getChannel().close(); + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + LOG.warn("Unexpected exception from downstream: ", cause.getCause()); + ctx.channel().close(); } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java index 177fa3d80b1b4..29155c80b1846 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java @@ -20,14 +20,17 @@ import java.net.InetSocketAddress; import java.util.concurrent.Executors; -import org.jboss.netty.bootstrap.ServerBootstrap; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelFactory; -import org.jboss.netty.channel.ChannelPipeline; -import org.jboss.netty.channel.ChannelPipelineFactory; -import org.jboss.netty.channel.Channels; -import org.jboss.netty.channel.SimpleChannelUpstreamHandler; -import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,9 +42,11 @@ public class SimpleTcpServer { LoggerFactory.getLogger(SimpleTcpServer.class); protected final int port; protected int boundPort = -1; // Will be set after server starts - protected final SimpleChannelUpstreamHandler rpcProgram; + protected final ChannelInboundHandlerAdapter rpcProgram; private ServerBootstrap server; private Channel ch; + private EventLoopGroup bossGroup; + private EventLoopGroup workerGroup; /** The maximum number of I/O worker threads */ protected final int workerCount; @@ -57,37 +62,32 @@ public SimpleTcpServer(int port, RpcProgram program, int workercount) { this.workerCount = workercount; } - public void run() { + public void run() throws InterruptedException { // Configure the Server. - ChannelFactory factory; - if (workerCount == 0) { - // Use default workers: 2 * the number of available processors - factory = new NioServerSocketChannelFactory( - Executors.newCachedThreadPool(), Executors.newCachedThreadPool()); - } else { - factory = new NioServerSocketChannelFactory( - Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), - workerCount); - } + bossGroup = new NioEventLoopGroup(); + workerGroup = new NioEventLoopGroup(workerCount, Executors.newCachedThreadPool()); - server = new ServerBootstrap(factory); - server.setPipelineFactory(new ChannelPipelineFactory() { + server = new ServerBootstrap(); + server.group(bossGroup, workerGroup) + .channel(NioServerSocketChannel.class) + .childHandler(new ChannelInitializer() { @Override - public ChannelPipeline getPipeline() throws Exception { - return Channels.pipeline(RpcUtil.constructRpcFrameDecoder(), + protected void initChannel(SocketChannel ch) throws Exception { + ChannelPipeline p = ch.pipeline(); + p.addLast(RpcUtil.constructRpcFrameDecoder(), RpcUtil.STAGE_RPC_MESSAGE_PARSER, rpcProgram, RpcUtil.STAGE_RPC_TCP_RESPONSE); - } - }); - server.setOption("child.tcpNoDelay", true); - server.setOption("child.keepAlive", true); - server.setOption("child.reuseAddress", true); - server.setOption("reuseAddress", true); + }}) + .childOption(ChannelOption.TCP_NODELAY, true) + .childOption(ChannelOption.SO_KEEPALIVE, true) + .childOption(ChannelOption.SO_REUSEADDR, true) + .option(ChannelOption.SO_REUSEADDR, true); // Listen to TCP port - ch = server.bind(new InetSocketAddress(port)); - InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress(); + ChannelFuture f = server.bind(new InetSocketAddress(port)).sync(); + ch = f.channel(); + InetSocketAddress socketAddr = (InetSocketAddress) ch.localAddress(); boundPort = socketAddr.getPort(); LOG.info("Started listening to TCP requests at port " + boundPort + " for " @@ -102,9 +102,17 @@ public int getBoundPort() { public void shutdown() { if (ch != null) { ch.close().awaitUninterruptibly(); + ch = null; + } + + if (workerGroup != null) { + workerGroup.shutdownGracefully(); + workerGroup = null; } - if (server != null) { - server.releaseExternalResources(); + + if (bossGroup != null) { + bossGroup.shutdownGracefully(); + bossGroup = null; } } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java index e65003ca64beb..516503c323a08 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java @@ -20,12 +20,16 @@ import java.net.InetSocketAddress; import java.util.concurrent.Executors; -import org.jboss.netty.bootstrap.ConnectionlessBootstrap; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.Channels; -import org.jboss.netty.channel.SimpleChannelUpstreamHandler; -import org.jboss.netty.channel.socket.DatagramChannelFactory; -import org.jboss.netty.channel.socket.nio.NioDatagramChannelFactory; +import io.netty.bootstrap.Bootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioDatagramChannel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,36 +43,45 @@ public class SimpleUdpServer { private final int RECEIVE_BUFFER_SIZE = 65536; protected final int port; - protected final SimpleChannelUpstreamHandler rpcProgram; + protected final ChannelInboundHandlerAdapter rpcProgram; protected final int workerCount; protected int boundPort = -1; // Will be set after server starts - private ConnectionlessBootstrap server; + private Bootstrap server; private Channel ch; + private EventLoopGroup workerGroup; - public SimpleUdpServer(int port, SimpleChannelUpstreamHandler program, + public SimpleUdpServer(int port, ChannelInboundHandlerAdapter program, int workerCount) { this.port = port; this.rpcProgram = program; this.workerCount = workerCount; } - public void run() { - // Configure the client. - DatagramChannelFactory f = new NioDatagramChannelFactory( - Executors.newCachedThreadPool(), workerCount); + public void run() throws InterruptedException { + workerGroup = new NioEventLoopGroup(workerCount, Executors.newCachedThreadPool()); - server = new ConnectionlessBootstrap(f); - server.setPipeline(Channels.pipeline(RpcUtil.STAGE_RPC_MESSAGE_PARSER, - rpcProgram, RpcUtil.STAGE_RPC_UDP_RESPONSE)); - - server.setOption("broadcast", "false"); - server.setOption("sendBufferSize", SEND_BUFFER_SIZE); - server.setOption("receiveBufferSize", RECEIVE_BUFFER_SIZE); - server.setOption("reuseAddress", true); + server = new Bootstrap(); + server.group(workerGroup) + .channel(NioDatagramChannel.class) + .option(ChannelOption.SO_BROADCAST, true) + .option(ChannelOption.SO_SNDBUF, SEND_BUFFER_SIZE) + .option(ChannelOption.SO_RCVBUF, RECEIVE_BUFFER_SIZE) + .option(ChannelOption.SO_REUSEADDR, true) + .handler(new ChannelInitializer() { + @Override protected void initChannel(NioDatagramChannel ch) + throws Exception { + ChannelPipeline p = ch.pipeline(); + p.addLast( + RpcUtil.STAGE_RPC_MESSAGE_PARSER, + rpcProgram, + RpcUtil.STAGE_RPC_UDP_RESPONSE); + } + }); // Listen to the UDP port - ch = server.bind(new InetSocketAddress(port)); - InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress(); + ChannelFuture f = server.bind(new InetSocketAddress(port)).sync(); + ch = f.channel(); + InetSocketAddress socketAddr = (InetSocketAddress) ch.localAddress(); boundPort = socketAddr.getPort(); LOG.info("Started listening to UDP requests at port " + boundPort + " for " @@ -83,9 +96,11 @@ public int getBoundPort() { public void shutdown() { if (ch != null) { ch.close().awaitUninterruptibly(); + ch = null; } - if (server != null) { - server.releaseExternalResources(); + if (workerGroup != null) { + workerGroup.shutdownGracefully(); + workerGroup = null; } } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java index 419eff831f0e7..6000fd57a1b65 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java @@ -20,8 +20,8 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @@ -242,7 +242,7 @@ static byte[] recordMark(int size, boolean last) { * @param last specifies last request or not * @return TCP buffer */ - public static ChannelBuffer writeMessageTcp(XDR request, boolean last) { + public static ByteBuf writeMessageTcp(XDR request, boolean last) { Preconditions.checkState(request.state == XDR.State.WRITING); ByteBuffer b = request.buf.duplicate(); b.flip(); @@ -250,7 +250,7 @@ public static ChannelBuffer writeMessageTcp(XDR request, boolean last) { ByteBuffer headerBuf = ByteBuffer.wrap(fragmentHeader); // TODO: Investigate whether making a copy of the buffer is necessary. - return ChannelBuffers.copiedBuffer(headerBuf, b); + return Unpooled.wrappedBuffer(headerBuf, b); } /** @@ -258,10 +258,10 @@ public static ChannelBuffer writeMessageTcp(XDR request, boolean last) { * @param response XDR response * @return UDP buffer */ - public static ChannelBuffer writeMessageUdp(XDR response) { + public static ByteBuf writeMessageUdp(XDR response) { Preconditions.checkState(response.state == XDR.State.READING); // TODO: Investigate whether making a copy of the buffer is necessary. - return ChannelBuffers.copiedBuffer(response.buf); + return Unpooled.copiedBuffer(response.buf); } public static int fragmentSize(byte[] mark) { diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java index 80f43828ea83b..1a8a305436ce9 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java @@ -22,21 +22,27 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import io.netty.bootstrap.Bootstrap; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.group.ChannelGroup; +import io.netty.channel.group.DefaultChannelGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioDatagramChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; +import io.netty.handler.timeout.IdleStateHandler; +import io.netty.util.concurrent.GlobalEventExecutor; import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcUtil; import org.apache.hadoop.util.StringUtils; -import org.jboss.netty.bootstrap.ConnectionlessBootstrap; -import org.jboss.netty.bootstrap.ServerBootstrap; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelPipeline; -import org.jboss.netty.channel.ChannelPipelineFactory; -import org.jboss.netty.channel.Channels; -import org.jboss.netty.channel.group.ChannelGroup; -import org.jboss.netty.channel.group.DefaultChannelGroup; -import org.jboss.netty.channel.socket.nio.NioDatagramChannelFactory; -import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; -import org.jboss.netty.handler.timeout.IdleStateHandler; -import org.jboss.netty.util.HashedWheelTimer; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; @@ -49,11 +55,17 @@ final class Portmap { private static final Logger LOG = LoggerFactory.getLogger(Portmap.class); private static final int DEFAULT_IDLE_TIME_MILLISECONDS = 5000; - private ConnectionlessBootstrap udpServer; + private Bootstrap udpServer; private ServerBootstrap tcpServer; - private ChannelGroup allChannels = new DefaultChannelGroup(); + private ChannelGroup allChannels = new DefaultChannelGroup( + GlobalEventExecutor.INSTANCE); private Channel udpChannel; private Channel tcpChannel; + + EventLoopGroup bossGroup; + EventLoopGroup workerGroup; + EventLoopGroup udpGroup; + private final RpcProgramPortmap handler = new RpcProgramPortmap(allChannels); public static void main(String[] args) { @@ -73,18 +85,19 @@ public static void main(String[] args) { void shutdown() { allChannels.close().awaitUninterruptibly(); - tcpServer.releaseExternalResources(); - udpServer.releaseExternalResources(); + bossGroup.shutdownGracefully(); + workerGroup.shutdownGracefully(); + udpGroup.shutdownGracefully(); } @VisibleForTesting SocketAddress getTcpServerLocalAddress() { - return tcpChannel.getLocalAddress(); + return tcpChannel.localAddress(); } @VisibleForTesting SocketAddress getUdpServerLoAddress() { - return udpChannel.getLocalAddress(); + return udpChannel.localAddress(); } @VisibleForTesting @@ -93,38 +106,55 @@ RpcProgramPortmap getHandler() { } void start(final int idleTimeMilliSeconds, final SocketAddress tcpAddress, - final SocketAddress udpAddress) { - - tcpServer = new ServerBootstrap(new NioServerSocketChannelFactory( - Executors.newCachedThreadPool(), Executors.newCachedThreadPool())); - tcpServer.setPipelineFactory(new ChannelPipelineFactory() { - private final HashedWheelTimer timer = new HashedWheelTimer(); - private final IdleStateHandler idleStateHandler = new IdleStateHandler( - timer, 0, 0, idleTimeMilliSeconds, TimeUnit.MILLISECONDS); - - @Override - public ChannelPipeline getPipeline() throws Exception { - return Channels.pipeline(RpcUtil.constructRpcFrameDecoder(), - RpcUtil.STAGE_RPC_MESSAGE_PARSER, idleStateHandler, handler, - RpcUtil.STAGE_RPC_TCP_RESPONSE); - } - }); - tcpServer.setOption("reuseAddress", true); - tcpServer.setOption("child.reuseAddress", true); - - udpServer = new ConnectionlessBootstrap(new NioDatagramChannelFactory( - Executors.newCachedThreadPool())); - - udpServer.setPipeline(Channels.pipeline(RpcUtil.STAGE_RPC_MESSAGE_PARSER, - handler, RpcUtil.STAGE_RPC_UDP_RESPONSE)); - udpServer.setOption("reuseAddress", true); - - tcpChannel = tcpServer.bind(tcpAddress); - udpChannel = udpServer.bind(udpAddress); + final SocketAddress udpAddress) throws InterruptedException { + + bossGroup = new NioEventLoopGroup(); + workerGroup = new NioEventLoopGroup(0, Executors.newCachedThreadPool()); + + tcpServer = new ServerBootstrap(); + tcpServer.group(bossGroup, workerGroup) + .option(ChannelOption.SO_REUSEADDR, true) + .childOption(ChannelOption.SO_REUSEADDR, true) + .channel(NioServerSocketChannel.class) + .childHandler(new ChannelInitializer() { + private final IdleStateHandler idleStateHandler = new IdleStateHandler( + 0, 0, idleTimeMilliSeconds, TimeUnit.MILLISECONDS); + + @Override + protected void initChannel(SocketChannel ch) throws Exception { + ChannelPipeline p = ch.pipeline(); + + p.addLast(RpcUtil.constructRpcFrameDecoder(), + RpcUtil.STAGE_RPC_MESSAGE_PARSER, idleStateHandler, handler, + RpcUtil.STAGE_RPC_TCP_RESPONSE); + }}); + + udpGroup = new NioEventLoopGroup(0, Executors.newCachedThreadPool()); + + udpServer = new Bootstrap(); + udpServer.group(udpGroup) + .channel(NioDatagramChannel.class) + .handler(new ChannelInitializer() { + @Override protected void initChannel(NioDatagramChannel ch) + throws Exception { + ChannelPipeline p = ch.pipeline(); + p.addLast( + new LoggingHandler(LogLevel.DEBUG), + RpcUtil.STAGE_RPC_MESSAGE_PARSER, handler, RpcUtil.STAGE_RPC_UDP_RESPONSE); + } + }) + .option(ChannelOption.SO_REUSEADDR, true); + + ChannelFuture tcpChannelFuture = null; + tcpChannelFuture = tcpServer.bind(tcpAddress); + ChannelFuture udpChannelFuture = udpServer.bind(udpAddress); + tcpChannel = tcpChannelFuture.sync().channel(); + udpChannel = udpChannelFuture.sync().channel(); + allChannels.add(tcpChannel); allChannels.add(udpChannel); - LOG.info("Portmap server started at tcp://" + tcpChannel.getLocalAddress() - + ", udp://" + udpChannel.getLocalAddress()); + LOG.info("Portmap server started at tcp://" + tcpChannel.localAddress() + + ", udp://" + udpChannel.localAddress()); } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java index 0bc380f614c1c..7b33a644fbe76 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java @@ -19,6 +19,14 @@ import java.util.concurrent.ConcurrentHashMap; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.group.ChannelGroup; +import io.netty.handler.timeout.IdleState; +import io.netty.handler.timeout.IdleStateEvent; +import io.netty.handler.timeout.IdleStateHandler; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcInfo; @@ -27,20 +35,12 @@ import org.apache.hadoop.oncrpc.RpcUtil; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.VerifierNone; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; -import org.jboss.netty.channel.ChannelHandlerContext; -import org.jboss.netty.channel.ChannelStateEvent; -import org.jboss.netty.channel.ExceptionEvent; -import org.jboss.netty.channel.MessageEvent; -import org.jboss.netty.channel.group.ChannelGroup; -import org.jboss.netty.handler.timeout.IdleState; -import org.jboss.netty.handler.timeout.IdleStateAwareChannelUpstreamHandler; -import org.jboss.netty.handler.timeout.IdleStateEvent; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; -final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler { +@ChannelHandler.Sharable +final class RpcProgramPortmap extends IdleStateHandler { static final int PROGRAM = 100000; static final int VERSION = 2; @@ -60,6 +60,8 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler { private final ChannelGroup allChannels; RpcProgramPortmap(ChannelGroup allChannels) { + super(1, 1, 1); + // FIXME: set default idle timeout 1 second. this.allChannels = allChannels; PortmapMapping m = new PortmapMapping(PROGRAM, VERSION, PortmapMapping.TRANSPORT_TCP, RpcProgram.RPCB_PORT); @@ -151,14 +153,14 @@ private XDR dump(int xid, XDR in, XDR out) { } @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - RpcInfo info = (RpcInfo) e.getMessage(); + RpcInfo info = (RpcInfo) msg; RpcCall rpcCall = (RpcCall) info.header(); final int portmapProc = rpcCall.getProcedure(); int xid = rpcCall.getXid(); - XDR in = new XDR(info.data().toByteBuffer().asReadOnlyBuffer(), + XDR in = new XDR(info.data().nioBuffer().asReadOnlyBuffer(), XDR.State.READING); XDR out = new XDR(); @@ -181,29 +183,29 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) reply.write(out); } - ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap() + ByteBuf buf = Unpooled.wrappedBuffer(out.asReadOnlyWrap() .buffer()); RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); RpcUtil.sendRpcResponse(ctx, rsp); } @Override - public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent e) + public void channelActive(ChannelHandlerContext ctx) throws Exception { - allChannels.add(e.getChannel()); + allChannels.add(ctx.channel()); } @Override public void channelIdle(ChannelHandlerContext ctx, IdleStateEvent e) throws Exception { - if (e.getState() == IdleState.ALL_IDLE) { - e.getChannel().close(); + if (e.state() == IdleState.ALL_IDLE) { + ctx.channel().close(); } } @Override - public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) { - LOG.warn("Encountered ", e.getCause()); - e.getChannel().close(); + public void exceptionCaught(ChannelHandlerContext ctx, Throwable t) { + LOG.warn("Encountered ", t); + ctx.channel().close(); } } diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java index 0e416b3738d20..6d103fdd781c6 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java @@ -22,19 +22,19 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; import java.util.Random; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelException; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder; import org.apache.hadoop.oncrpc.security.CredentialsNone; import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.test.GenericTestUtils; -import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelException; -import org.jboss.netty.channel.ChannelHandlerContext; import org.junit.Test; import org.mockito.Mockito; import org.slf4j.event.Level; @@ -55,6 +55,7 @@ static void testRequest(XDR request, int serverPort) { tcpClient.run(); } + @ChannelHandler.Sharable static class TestRpcProgram extends RpcProgram { protected TestRpcProgram(String program, String host, int port, @@ -83,7 +84,7 @@ protected void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { new VerifierNone()); XDR out = new XDR(); reply.write(out); - ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer()); + ByteBuf b = Unpooled.wrappedBuffer(out.asReadOnlyWrap().buffer()); RpcResponse rsp = new RpcResponse(b, info.remoteAddress()); RpcUtil.sendRpcResponse(ctx, rsp); } @@ -99,13 +100,14 @@ public void testSingleFrame() { RpcFrameDecoder decoder = new RpcFrameDecoder(); // Test "Length field is not received yet" - ByteBuffer buffer = ByteBuffer.allocate(1); - ChannelBuffer buf = new ByteBufferBackedChannelBuffer(buffer); - ChannelBuffer channelBuffer = (ChannelBuffer) decoder.decode( - Mockito.mock(ChannelHandlerContext.class), Mockito.mock(Channel.class), - buf); - assertTrue(channelBuffer == null); + ByteBuf buf = Unpooled.directBuffer(1); + List outputBufs = new ArrayList<>(); + decoder.decode( + Mockito.mock(ChannelHandlerContext.class), buf, + outputBufs); + assertTrue(outputBufs.isEmpty()); + decoder = new RpcFrameDecoder(); // Test all bytes are not received yet byte[] fragment = new byte[4 + 9]; fragment[0] = (byte) (1 << 7); // final fragment @@ -114,15 +116,16 @@ public void testSingleFrame() { fragment[3] = (byte) 10; // fragment size = 10 bytes assertTrue(XDR.isLastFragment(fragment)); assertTrue(XDR.fragmentSize(fragment)==10); + buf.release(); - buffer = ByteBuffer.allocate(4 + 9); - buffer.put(fragment); - buffer.flip(); - buf = new ByteBufferBackedChannelBuffer(buffer); - channelBuffer = (ChannelBuffer) decoder.decode( - Mockito.mock(ChannelHandlerContext.class), Mockito.mock(Channel.class), - buf); - assertTrue(channelBuffer == null); + buf = Unpooled.directBuffer(4 + 9); + buf.writeBytes(fragment); + outputBufs = new ArrayList<>(); + decoder.decode( + Mockito.mock(ChannelHandlerContext.class), buf, + outputBufs); + assertTrue(decoder.isLast()); + buf.release(); } @Test @@ -137,16 +140,15 @@ public void testMultipleFrames() { fragment1[3] = (byte) 10; // fragment size = 10 bytes assertFalse(XDR.isLastFragment(fragment1)); assertTrue(XDR.fragmentSize(fragment1)==10); + + List outputBufs = new ArrayList<>(); // decoder should wait for the final fragment - ByteBuffer buffer = ByteBuffer.allocate(4 + 10); - buffer.put(fragment1); - buffer.flip(); - ChannelBuffer buf = new ByteBufferBackedChannelBuffer(buffer); - ChannelBuffer channelBuffer = (ChannelBuffer) decoder.decode( - Mockito.mock(ChannelHandlerContext.class), Mockito.mock(Channel.class), - buf); - assertTrue(channelBuffer == null); + ByteBuf buf = Unpooled.directBuffer(4 + 10, 4 + 10); + buf.writeBytes(fragment1); + decoder.decode( + Mockito.mock(ChannelHandlerContext.class), buf, + outputBufs); byte[] fragment2 = new byte[4 + 10]; fragment2[0] = (byte) (1 << 7); // final fragment @@ -155,21 +157,22 @@ public void testMultipleFrames() { fragment2[3] = (byte) 10; // fragment size = 10 bytes assertTrue(XDR.isLastFragment(fragment2)); assertTrue(XDR.fragmentSize(fragment2)==10); + buf.release(); - buffer = ByteBuffer.allocate(4 + 10); - buffer.put(fragment2); - buffer.flip(); - buf = new ByteBufferBackedChannelBuffer(buffer); - channelBuffer = (ChannelBuffer) decoder.decode( - Mockito.mock(ChannelHandlerContext.class), Mockito.mock(Channel.class), - buf); - assertTrue(channelBuffer != null); - // Complete frame should have to total size 10+10=20 - assertEquals(20, channelBuffer.readableBytes()); + buf = Unpooled.directBuffer(4 + 10, 4 + 10); + buf.writeBytes(fragment2); + decoder.decode( + Mockito.mock(ChannelHandlerContext.class), buf, + outputBufs); + // Expect two completed frames each 10 bytes + decoder.isLast(); + assertEquals(2, outputBufs.size()); + outputBufs.forEach(b -> assertEquals(((ByteBuf)b).readableBytes(), 10)); + buf.release(); } @Test - public void testFrames() { + public void testFrames() throws InterruptedException { int serverPort = startRpcServer(true); XDR xdrOut = createGetportMount(); @@ -187,7 +190,7 @@ public void testFrames() { } @Test - public void testUnprivilegedPort() { + public void testUnprivilegedPort() throws InterruptedException { // Don't allow connections from unprivileged ports. Given that this test is // presumably not being run by root, this will be the case. int serverPort = startRpcServer(false); @@ -218,23 +221,28 @@ public void testUnprivilegedPort() { assertEquals(requestSize, resultSize); } - private static int startRpcServer(boolean allowInsecurePorts) { + private static int startRpcServer(boolean allowInsecurePorts) + throws InterruptedException { Random rand = new Random(); int serverPort = 30000 + rand.nextInt(10000); int retries = 10; // A few retries in case initial choice is in use. while (true) { + SimpleTcpServer tcpServer = null; try { RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram", "localhost", serverPort, 100000, 1, 2, allowInsecurePorts); - SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1); + tcpServer = new SimpleTcpServer(serverPort, program, 1); tcpServer.run(); break; // Successfully bound a port, break out. - } catch (ChannelException ce) { + } catch (InterruptedException | ChannelException e) { + if (tcpServer != null) { + tcpServer.shutdown(); + } if (retries-- > 0) { serverPort += rand.nextInt(20); // Port in use? Try another. } else { - throw ce; // Out of retries. + throw e; // Out of retries. } } } diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java index 6941c4a04e998..8ebf9d03c6c30 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java @@ -43,7 +43,7 @@ public class TestPortmap { private int xid; @BeforeClass - public static void setup() { + public static void setup() throws InterruptedException { pm.start(SHORT_TIMEOUT_MILLISECONDS, new InetSocketAddress("localhost", 0), new InetSocketAddress("localhost", 0)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index 18a91cbcdc6f8..ab21583964b5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -47,7 +47,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> io.netty - netty + netty-all compile diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index 3b0327ad4a149..2ba1bb060ce43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -26,6 +26,10 @@ import java.util.List; import java.util.HashMap; +import io.netty.channel.ChannelHandler; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileSystem; @@ -51,15 +55,13 @@ import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; -import org.jboss.netty.channel.ChannelHandlerContext; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * RPC program corresponding to mountd daemon. See {@link Mountd}. */ +@ChannelHandler.Sharable public class RpcProgramMountd extends RpcProgram implements MountInterface { private static final Logger LOG = LoggerFactory.getLogger(RpcProgramMountd.class); @@ -262,8 +264,8 @@ RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()) RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( out); } - ChannelBuffer buf = - ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer()); + ByteBuf buf = + Unpooled.wrappedBuffer(out.asReadOnlyWrap().buffer()); RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); RpcUtil.sendRpcResponse(ctx, rsp); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java index c6da1981f3716..c58dc5976b37d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java @@ -22,6 +22,8 @@ import java.net.URI; import java.nio.file.FileSystemException; +import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; @@ -39,8 +41,6 @@ import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.security.IdMappingServiceProvider; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.channel.Channel; /** * Utility/helper methods related to NFS @@ -147,16 +147,16 @@ public static void writeChannel(Channel channel, XDR out, int xid) { if (RpcProgramNfs3.LOG.isDebugEnabled()) { RpcProgramNfs3.LOG.debug(WRITE_RPC_END + xid); } - ChannelBuffer outBuf = XDR.writeMessageTcp(out, true); - channel.write(outBuf); + ByteBuf outBuf = XDR.writeMessageTcp(out, true); + channel.writeAndFlush(outBuf); } public static void writeChannelCommit(Channel channel, XDR out, int xid) { if (RpcProgramNfs3.LOG.isDebugEnabled()) { RpcProgramNfs3.LOG.debug("Commit done:" + xid); } - ChannelBuffer outBuf = XDR.writeMessageTcp(out, true); - channel.write(outBuf); + ByteBuf outBuf = XDR.writeMessageTcp(out, true); + channel.writeAndFlush(outBuf); } private static boolean isSet(int access, int bits) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index 528ead7a003b3..8358c056cac4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; +import io.netty.channel.Channel; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; @@ -55,7 +56,6 @@ import org.apache.hadoop.security.IdMappingServiceProvider; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; -import org.jboss.netty.channel.Channel; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index d436eac598be0..f6cb4350e4050 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -28,6 +28,11 @@ import java.nio.charset.Charset; import java.util.EnumSet; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException; @@ -129,10 +134,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.util.JvmPauseMonitor; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelHandlerContext; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; @@ -141,6 +142,7 @@ /** * RPC program corresponding to nfs daemon. See {@link Nfs3}. */ +@ChannelHandler.Sharable public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { public static final int DEFAULT_UMASK = 0022; public static final FsPermission umask = new FsPermission( @@ -2180,7 +2182,7 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone()); rdr.write(reply); - ChannelBuffer buf = ChannelBuffers.wrappedBuffer(reply.asReadOnlyWrap() + ByteBuf buf = Unpooled.wrappedBuffer(reply.asReadOnlyWrap() .buffer()); RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); RpcUtil.sendRpcResponse(ctx, rsp); @@ -2291,7 +2293,7 @@ RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( } // TODO: currently we just return VerifierNone out = response.serialize(out, xid, new VerifierNone()); - ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap() + ByteBuf buf = Unpooled.wrappedBuffer(out.asReadOnlyWrap() .buffer()); RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java index 76859247bf2a3..d5c9d4f55924a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java @@ -22,12 +22,12 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; +import io.netty.channel.Channel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; -import org.jboss.netty.channel.Channel; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java index 288937104084b..a1b6e12eebfcf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.EnumSet; +import io.netty.channel.Channel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -43,7 +44,6 @@ import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.security.IdMappingServiceProvider; -import org.jboss.netty.channel.Channel; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java index 4e53c72bec8a8..31528a2db87a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java @@ -21,6 +21,12 @@ import java.nio.ByteBuffer; import java.util.Arrays; +import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.socket.SocketChannel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; @@ -42,13 +48,6 @@ import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.CredentialsNone; import org.apache.hadoop.oncrpc.security.VerifierNone; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.ChannelHandlerContext; -import org.jboss.netty.channel.ChannelPipeline; -import org.jboss.netty.channel.ChannelPipelineFactory; -import org.jboss.netty.channel.Channels; -import org.jboss.netty.channel.MessageEvent; public class TestOutOfOrderWrite { public final static Logger LOG = @@ -100,9 +99,9 @@ public WriteHandler(XDR request) { } @Override - public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { + public void channelRead(ChannelHandlerContext ctx, Object msg) { // Get handle from create response - ChannelBuffer buf = (ChannelBuffer) e.getMessage(); + ByteBuf buf = (ByteBuf) msg; XDR rsp = new XDR(buf.array()); if (rsp.getBytes().length == 0) { LOG.info("rsp length is zero, why?"); @@ -125,7 +124,7 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { rsp.readBoolean(); // value follow handle = new FileHandle(); handle.deserialize(rsp); - channel = e.getChannel(); + channel = ctx.channel(); } } @@ -136,16 +135,17 @@ public WriteClient(String host, int port, XDR request, Boolean oneShot) { } @Override - protected ChannelPipelineFactory setPipelineFactory() { - this.pipelineFactory = new ChannelPipelineFactory() { + protected ChannelInitializer setChannelHandler() { + return new ChannelInitializer() { @Override - public ChannelPipeline getPipeline() { - return Channels.pipeline( + protected void initChannel(SocketChannel ch) throws Exception { + ChannelPipeline p = ch.pipeline(); + p.addLast( RpcUtil.constructRpcFrameDecoder(), - new WriteHandler(request)); + new WriteHandler(request) + ); } }; - return this.pipelineFactory; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java index 30ecc0b824b9e..07954c00d64e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java @@ -28,6 +28,7 @@ import java.nio.ByteBuffer; import java.util.EnumSet; +import io.netty.channel.Channel; import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -92,7 +93,6 @@ import org.apache.hadoop.security.IdMappingConstant; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.jboss.netty.channel.Channel; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java index f7a92fac53501..0f03c6da93bf3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java @@ -27,6 +27,7 @@ import java.util.Arrays; import java.util.concurrent.ConcurrentNavigableMap; +import io.netty.channel.Channel; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -52,7 +53,6 @@ import org.apache.hadoop.security.ShellBasedIdMapping; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.jboss.netty.channel.Channel; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; From e59eb7512acda9b850e565ae6be97dec82c66e6c Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 28 Apr 2021 23:54:39 -0700 Subject: [PATCH 283/666] HDFS-15624. fix the function of setting quota by storage type (#2377) (#2955) 1. puts NVDIMM to the end of storage type enum to make sure compatibility. 2. adds check to make sure the software layout version is satisfied Co-authored-by: su xu Co-authored-by: huangtianhua Co-authored-by: YaYun-Wang <34060507+YaYun-Wang@users.noreply.github.com> Signed-off-by: Mingliang Liu Signed-off-by: Ayush Saxena Signed-off-by: Vinayakumar B Change-Id: I3c58beef50730827a09b3c968e9ad637baa57d44 --- .../java/org/apache/hadoop/fs/StorageType.java | 7 +++---- .../java/org/apache/hadoop/fs/shell/TestCount.java | 4 ++-- .../server/federation/router/TestRouterQuota.java | 14 +++++++------- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 7 +++++++ .../server/namenode/NameNodeLayoutVersion.java | 3 ++- .../apache/hadoop/hdfs/TestBlockStoragePolicy.java | 6 +++--- .../hadoop/hdfs/protocol/TestLayoutVersion.java | 3 ++- 7 files changed, 26 insertions(+), 18 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java index b17864a22282a..2b5e4f1c5cee2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java @@ -34,13 +34,12 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public enum StorageType { - // sorted by the speed of the storage types, from fast to slow RAM_DISK(true, true), - NVDIMM(false, true), SSD(false, false), DISK(false, false), ARCHIVE(false, false), - PROVIDED(false, false); + PROVIDED(false, false), + NVDIMM(false, true); private final boolean isTransient; private final boolean isRAM; @@ -122,4 +121,4 @@ public static String getConf(Configuration conf, StorageType t, String name) { return conf.get(CONF_KEY_HEADER + t.toString() + "." + name); } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java index 618cb0190a99a..19516b8c40799 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java @@ -283,10 +283,10 @@ public void processPathWithQuotasByStorageTypesHeader() throws Exception { count.processOptions(options); String withStorageTypeHeader = // <----13---> <-------17------> <----13-----> <------17-------> - " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + " SSD_QUOTA REM_SSD_QUOTA DISK_QUOTA REM_DISK_QUOTA " + // <----13---> <-------17------> "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA PROVIDED_QUOTA REM_PROVIDED_QUOTA " + + " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + "PATHNAME"; verify(out).println(withStorageTypeHeader); verifyNoMoreInteractions(out); @@ -338,11 +338,11 @@ public void processPathWithQuotasByQTVH() throws Exception { count.processOptions(options); String withStorageTypeHeader = // <----13---> <-------17------> - " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + " SSD_QUOTA REM_SSD_QUOTA " + " DISK_QUOTA REM_DISK_QUOTA " + "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " + "PROVIDED_QUOTA REM_PROVIDED_QUOTA " + + " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + "PATHNAME"; verify(out).println(withStorageTypeHeader); verifyNoMoreInteractions(out); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java index 551ae8a8e0612..b69004198eb48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java @@ -414,13 +414,13 @@ public void testStorageTypeQuota() throws Exception { QuotaUsage usage = client.getQuotaUsage("/type0"); assertEquals(HdfsConstants.QUOTA_RESET, usage.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, usage.getSpaceQuota()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota * 2, -1, -1}, null, - usage); + verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota * 2, -1, -1, -1}, + null, usage); // Verify /type1 quota on NN1. usage = client.getQuotaUsage("/type1"); assertEquals(HdfsConstants.QUOTA_RESET, usage.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, usage.getSpaceQuota()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota, -1, -1}, null, + verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota, -1, -1, -1}, null, usage); FileSystem routerFs = routerContext.getFileSystem(); @@ -431,15 +431,15 @@ public void testStorageTypeQuota() throws Exception { assertEquals(2, u1.getFileAndDirectoryCount()); assertEquals(HdfsConstants.QUOTA_RESET, u1.getSpaceQuota()); assertEquals(fileSize * 3, u1.getSpaceConsumed()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota, -1, -1}, - new long[] {0, 0, 0, fileSize * 3, 0, 0}, u1); + verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota, -1, -1, -1}, + new long[] {0, 0, fileSize * 3, 0, 0, 0}, u1); // Verify /type0 storage type quota usage on Router. assertEquals(HdfsConstants.QUOTA_RESET, u0.getQuota()); assertEquals(4, u0.getFileAndDirectoryCount()); assertEquals(HdfsConstants.QUOTA_RESET, u0.getSpaceQuota()); assertEquals(fileSize * 3 * 2, u0.getSpaceConsumed()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota * 2, -1, -1}, - new long[] {0, 0, 0, fileSize * 3 * 2, 0, 0}, u0); + verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota * 2, -1, -1, -1}, + new long[] {0, 0, fileSize * 3 * 2, 0, 0, 0}, u0); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7b9efea538e18..095959b45bf67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2434,6 +2434,10 @@ private void checkStoragePolicyEnabled(final String operationNameReadable, * @throws IOException */ void setStoragePolicy(String src, String policyName) throws IOException { + if (policyName.equalsIgnoreCase( + HdfsConstants.ALLNVDIMM_STORAGE_POLICY_NAME)) { + requireEffectiveLayoutVersionForFeature(Feature.NVDIMM_SUPPORT); + } final String operationName = "setStoragePolicy"; checkOperation(OperationCategory.WRITE); checkStoragePolicyEnabled("set storage policy", true); @@ -3576,6 +3580,9 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) if (type != null) { requireEffectiveLayoutVersionForFeature(Feature.QUOTA_BY_STORAGE_TYPE); } + if (type == StorageType.NVDIMM) { + requireEffectiveLayoutVersionForFeature(Feature.NVDIMM_SUPPORT); + } checkOperation(OperationCategory.WRITE); final String operationName = getQuotaCommand(nsQuota, ssQuota); final FSPermissionChecker pc = getPermissionChecker(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java index bcb3714268aa2..f5e261dc78aae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java @@ -90,7 +90,8 @@ public enum Feature implements LayoutFeature { QUOTA_BY_STORAGE_TYPE(-63, -61, "Support quota for specific storage types"), ERASURE_CODING(-64, -61, "Support erasure coding"), EXPANDED_STRING_TABLE(-65, -61, "Support expanded string table in fsimage"), - SNAPSHOT_MODIFICATION_TIME(-66, -61, "Support modification time for snapshot"); + SNAPSHOT_MODIFICATION_TIME(-66, -61, "Support modification time for snapshot"), + NVDIMM_SUPPORT(-67, -61, "Support NVDIMM storage type"); private final FeatureInfo info; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index efb4b4df358f4..ef116957fb84d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -1421,29 +1421,29 @@ public void testStorageType() { final EnumMap map = new EnumMap<>(StorageType.class); //put storage type is reversed order + map.put(StorageType.NVDIMM, 1); map.put(StorageType.ARCHIVE, 1); map.put(StorageType.DISK, 1); map.put(StorageType.SSD, 1); map.put(StorageType.RAM_DISK, 1); - map.put(StorageType.NVDIMM, 1); { final Iterator i = map.keySet().iterator(); Assert.assertEquals(StorageType.RAM_DISK, i.next()); - Assert.assertEquals(StorageType.NVDIMM, i.next()); Assert.assertEquals(StorageType.SSD, i.next()); Assert.assertEquals(StorageType.DISK, i.next()); Assert.assertEquals(StorageType.ARCHIVE, i.next()); + Assert.assertEquals(StorageType.NVDIMM, i.next()); } { final Iterator> i = map.entrySet().iterator(); Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey()); - Assert.assertEquals(StorageType.NVDIMM, i.next().getKey()); Assert.assertEquals(StorageType.SSD, i.next().getKey()); Assert.assertEquals(StorageType.DISK, i.next().getKey()); Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey()); + Assert.assertEquals(StorageType.NVDIMM, i.next().getKey()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java index 96629425f8d55..f074897001469 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java @@ -129,7 +129,8 @@ public void testNameNodeFeatureMinimumCompatibleLayoutVersions() { NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE, NameNodeLayoutVersion.Feature.ERASURE_CODING, NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE, - NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME); + NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME, + NameNodeLayoutVersion.Feature.NVDIMM_SUPPORT); for (LayoutFeature f : compatibleFeatures) { assertEquals(String.format("Expected minimum compatible layout version " + "%d for feature %s.", baseLV, f), baseLV, From 41483c351b5bf6e4ce635601488f756956d31e68 Mon Sep 17 00:00:00 2001 From: lfengnan Date: Thu, 29 Apr 2021 06:14:18 -0700 Subject: [PATCH 284/666] HDFS-15561. RBF: Remove NPE when local namenode is not configured (#2954). Contributed by Fengnan Li. Reviewed-by: He Xiaoqiao --- .../hdfs/server/federation/router/Router.java | 17 ++++++++- .../router/TestRouterNamenodeHeartbeat.java | 38 +++++++++++++++++++ .../router/TestRouterWebHdfsMethods.java | 1 + 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index d6e5a1cfe9626..ae9b62b7b4105 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -550,14 +550,20 @@ public void verifyToken(DelegationTokenIdentifier tokenId, byte[] password) * * @return Updater of the status for the local Namenode. */ - protected NamenodeHeartbeatService createLocalNamenodeHeartbeatService() { + @VisibleForTesting + public NamenodeHeartbeatService createLocalNamenodeHeartbeatService() { // Detect NN running in this machine String nsId = DFSUtil.getNamenodeNameServiceId(conf); + if (nsId == null) { + LOG.error("Cannot find local nameservice id"); + return null; + } String nnId = null; if (HAUtil.isHAEnabled(conf, nsId)) { nnId = HAUtil.getNameNodeId(conf, nsId); if (nnId == null) { LOG.error("Cannot find namenode id for local {}", nsId); + return null; } } @@ -787,4 +793,13 @@ public RouterAdminServer getAdminServer() { return adminServer; } + /** + * Set router configuration. + * @param conf + */ + @VisibleForTesting + public void setConf(Configuration conf) { + this.conf = conf; + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java index d2bc5d6eb2fcb..38419ed84926f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java @@ -17,15 +17,21 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.federation.MockResolver; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext; @@ -105,6 +111,38 @@ public void testNamenodeHeartbeatService() throws IOException { server.close(); } + @Test + public void testLocalNamenodeHeartbeatService() throws IOException { + Router router = new Router(); + Configuration conf = new Configuration(); + assertEquals(null, DFSUtil.getNamenodeNameServiceId(conf)); + + // case 1: no local nn is configured + router.setConf(conf); + assertNull(router.createLocalNamenodeHeartbeatService()); + + // case 2: local nn is configured + conf.set(DFS_NAMESERVICES, "ns1"); + assertEquals("ns1", DFSUtil.getNamenodeNameServiceId(conf)); + conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"), + "nn1,nn2"); + conf.set(DFSUtil.addKeySuffixes( + DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), + "localhost:8020"); + conf.set(DFSUtil.addKeySuffixes( + DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), + "ns1-nn2.example.com:8020"); + router.setConf(conf); + NamenodeHeartbeatService heartbeatService = + router.createLocalNamenodeHeartbeatService(); + assertNotNull(heartbeatService); + // we have to start the service to get the serviceAddress assigned + heartbeatService.init(conf); + assertEquals("ns1-nn1:localhost:8020", + heartbeatService.getNamenodeDesc()); + heartbeatService.stop(); + } + @Test public void testHearbeat() throws InterruptedException, IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java index 7028928041452..8e82d44c4ddfe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java @@ -61,6 +61,7 @@ public static void globalSetUp() throws Exception { .admin() .build(); cluster.addRouterOverrides(conf); + cluster.setIndependentDNs(); cluster.startCluster(); cluster.startRouters(); cluster.waitClusterUp(); From a39c5be3937ff20aebf99af10c4f15bd4bb837a9 Mon Sep 17 00:00:00 2001 From: Eric Badger Date: Thu, 29 Apr 2021 17:42:13 +0000 Subject: [PATCH 285/666] YARN-10707. Support custom resources in ResourceUtilization, and update Node GPU Utilization to use. Contributed by Qi Zhu --- .../yarn/api/records/ResourceUtilization.java | 128 +++++++++++++++++- .../src/main/proto/yarn_protos.proto | 6 + .../yarn/api/records/impl/pb/ProtoUtils.java | 29 ++++ .../impl/pb/ResourceUtilizationPBImpl.java | 31 ++++- .../yarn/api/BasePBImplRecordsTest.java | 2 + .../api/records/TestResourceUtilization.java | 49 +++++++ .../nodemanager/NodeResourceMonitorImpl.java | 30 ++-- .../gpu/GpuNodeResourceUpdateHandler.java | 43 +++++- .../gpu/TestGpuResourcePlugin.java | 4 +- 9 files changed, 297 insertions(+), 25 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java index f6c5a69eade00..ff3cec32b6522 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java @@ -22,6 +22,9 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; +import java.util.HashMap; +import java.util.Map; + /** *

    * ResourceUtilization models the utilization of a set of computer @@ -33,14 +36,26 @@ public abstract class ResourceUtilization implements Comparable { + private Map customResources + = new HashMap<>(); + @Public @Unstable - public static ResourceUtilization newInstance(int pmem, int vmem, float cpu) { + public static ResourceUtilization newInstance(int pmem, int vmem, + float cpu) { + return newInstance(pmem, vmem, cpu, null); + } + + @Public + @Unstable + public static ResourceUtilization newInstance(int pmem, int vmem, + float cpu, Map customResources) { ResourceUtilization utilization = Records.newRecord(ResourceUtilization.class); utilization.setPhysicalMemory(pmem); utilization.setVirtualMemory(vmem); utilization.setCPU(cpu); + utilization.setCustomResources(customResources); return utilization; } @@ -49,7 +64,9 @@ public static ResourceUtilization newInstance(int pmem, int vmem, float cpu) { public static ResourceUtilization newInstance( ResourceUtilization resourceUtil) { return newInstance(resourceUtil.getPhysicalMemory(), - resourceUtil.getVirtualMemory(), resourceUtil.getCPU()); + resourceUtil.getVirtualMemory(), + resourceUtil.getCPU(), + resourceUtil.getCustomResources()); } /** @@ -106,6 +123,51 @@ public static ResourceUtilization newInstance( @Unstable public abstract void setCPU(float cpu); + /** + * Get custom resource utilization + * (The amount of custom resource used). + * + * @param resourceName resourceName of custom resource + * @return resourceName utilization + */ + @Public + @Unstable + public float getCustomResource(String resourceName) { + if (customResources != null && resourceName != null) { + return customResources.get(resourceName); + } + return 0f; + } + + @Public + @Unstable + public Map getCustomResources() { + return customResources; + } + + @Public + @Unstable + public void setCustomResources(Map customResources) { + if (customResources != null) { + this.customResources = customResources; + } + } + + /** + * Set custom resource utilization + * (The amount of custom resource used). + * @param resourceName resourceName + * @param utilization utilization of custom resource + * + */ + @Public + @Unstable + public void setCustomResource(String resourceName, float utilization) { + if (resourceName != null && !resourceName.isEmpty()) { + customResources.put(resourceName, utilization); + } + } + @Override public int hashCode() { final int prime = 263167; @@ -113,6 +175,12 @@ public int hashCode() { result = prime * result + getVirtualMemory(); result = prime * result + getPhysicalMemory(); result = 31 * result + Float.valueOf(getCPU()).hashCode(); + if (customResources != null && !customResources.isEmpty()) { + for (Map.Entry entry : customResources.entrySet()) { + result = 31 * result + + customResources.get(entry.getKey()).hashCode(); + } + } return result; } @@ -130,7 +198,8 @@ public boolean equals(Object obj) { ResourceUtilization other = (ResourceUtilization) obj; if (getVirtualMemory() != other.getVirtualMemory() || getPhysicalMemory() != other.getPhysicalMemory() - || getCPU() != other.getCPU()) { + || getCPU() != other.getCPU() + || !customResources.equals(other.customResources)) { return false; } return true; @@ -138,8 +207,19 @@ public boolean equals(Object obj) { @Override public String toString() { - return ""; + StringBuilder utilizationString = new StringBuilder(); + utilizationString.append( + " entry : getCustomResources().entrySet()) { + utilizationString.append(", " + + entry.getKey() + ":" + entry.getValue()); + } + } + + utilizationString.append(">"); + return utilizationString.toString(); } /** @@ -151,9 +231,28 @@ public String toString() { @Public @Unstable public void addTo(int pmem, int vmem, float cpu) { + addTo(pmem, vmem, cpu, null, 0f); + } + + /** + * Add utilization to the current one. + * @param pmem Physical memory used to add. + * @param vmem Virtual memory used to add. + * @param cpu CPU utilization to add. + * @param resourceName of custom resource to add. + * @param utilization of custom resource to add. + */ + @Public + @Unstable + public void addTo(int pmem, int vmem, float cpu, + String resourceName, float utilization) { this.setPhysicalMemory(this.getPhysicalMemory() + pmem); this.setVirtualMemory(this.getVirtualMemory() + vmem); this.setCPU(this.getCPU() + cpu); + if (resourceName != null) { + this.setCustomResource(resourceName, + getCustomResource(resourceName) + utilization); + } } /** @@ -165,8 +264,27 @@ public void addTo(int pmem, int vmem, float cpu) { @Public @Unstable public void subtractFrom(int pmem, int vmem, float cpu) { + subtractFrom(pmem, vmem, cpu, null, 0f); + } + + /** + * Subtract utilization from the current one. + * @param pmem Physical memory to be subtracted. + * @param vmem Virtual memory to be subtracted. + * @param cpu CPU utilization to be subtracted. + * @param resourceName of custom resource to be subtracted. + * @param utilization of custom resource to be subtracted. + */ + @Public + @Unstable + public void subtractFrom(int pmem, int vmem, float cpu, + String resourceName, float utilization) { this.setPhysicalMemory(this.getPhysicalMemory() - pmem); this.setVirtualMemory(this.getVirtualMemory() - vmem); this.setCPU(this.getCPU() - cpu); + if (resourceName != null) { + this.setCustomResource(resourceName, + getCustomResource(resourceName) - utilization); + } } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 0b360dfca5941..558e724f8ac6b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -83,6 +83,7 @@ message ResourceUtilizationProto { optional int32 pmem = 1; optional int32 vmem = 2; optional float cpu = 3; + repeated StringFloatMapProto customResources = 4; } message ResourceOptionProto { @@ -243,6 +244,11 @@ message StringLongMapProto { required int64 value = 2; } +message StringFloatMapProto { + required string key = 1; + required float value = 2; +} + message ApplicationResourceUsageReportProto { optional int32 num_used_containers = 1; optional int32 num_reserved_containers = 2; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java index cdeb417243e37..64bf8cf5d3876 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java @@ -587,6 +587,22 @@ public static List convertMapToStringLongMapProto return ret; } + public static List + convertMapToStringFloatMapProtoList( + Map map) { + List ret = new ArrayList<>(); + if (map != null) { + for (Map.Entry entry : map.entrySet()) { + YarnProtos.StringFloatMapProto.Builder tmp = + YarnProtos.StringFloatMapProto.newBuilder(); + tmp.setKey(entry.getKey()); + tmp.setValue(entry.getValue()); + ret.add(tmp.build()); + } + } + return ret; + } + public static Map convertStringStringMapProtoListToMap( List pList) { Map ret = new HashMap<>(); @@ -600,6 +616,19 @@ public static Map convertStringStringMapProtoListToMap( return ret; } + public static Map convertStringFloatMapProtoListToMap( + List pList) { + Map ret = new HashMap<>(); + if (pList != null) { + for (YarnProtos.StringFloatMapProto p : pList) { + if (p.hasKey()) { + ret.put(p.getKey(), p.getValue()); + } + } + } + return ret; + } + public static List convertToProtoFormat( Map stringMap) { List pList = new ArrayList<>(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceUtilizationPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceUtilizationPBImpl.java index e37adbe63f97e..023d1e9ff68c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceUtilizationPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceUtilizationPBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceUtilizationProtoOrBuilder; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import java.util.Map; + @Private @Unstable public class ResourceUtilizationPBImpl extends ResourceUtilization { @@ -69,7 +71,7 @@ public void setPhysicalMemory(int pmem) { @Override public int getVirtualMemory() { ResourceUtilizationProtoOrBuilder p = viaProto ? proto : builder; - return (p.getVmem()); + return p.getVmem(); } @Override @@ -90,6 +92,28 @@ public void setCPU(float cpu) { builder.setCpu(cpu); } + @Override + public float getCustomResource(String resourceName) { + return getCustomResources().get(resourceName); + } + + @Override + public Map getCustomResources() { + ResourceUtilizationProtoOrBuilder p = viaProto ? proto : builder; + return ProtoUtils. + convertStringFloatMapProtoListToMap(p. + getCustomResourcesList()); + } + + @Override + public void setCustomResources(Map customResources) { + if (customResources != null) { + maybeInitBuilder(); + builder.addAllCustomResources(ProtoUtils. + convertMapToStringFloatMapProtoList(customResources)); + } + } + @Override public int compareTo(ResourceUtilization other) { int diff = this.getPhysicalMemory() - other.getPhysicalMemory(); @@ -97,6 +121,11 @@ public int compareTo(ResourceUtilization other) { diff = this.getVirtualMemory() - other.getVirtualMemory(); if (diff == 0) { diff = Float.compare(this.getCPU(), other.getCPU()); + if (diff == 0) { + diff = this.getCustomResources().size() - + other.getCustomResources().size(); + // todo how to compare custom resource in same size + } } } return diff; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java index affa08f082437..22b687c159e15 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java @@ -76,6 +76,8 @@ private static Object genTypeValue(Type type) { 'a' + rand.nextInt(26), 'a' + rand.nextInt(26), 'a' + rand.nextInt(26)); + } else if (type.equals(Float.class)) { + return rand.nextFloat(); } else if (type instanceof Class) { Class clazz = (Class)type; if (clazz.isArray()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java index 5934846e2f36f..a2b05708326b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java @@ -21,6 +21,9 @@ import org.junit.Assert; import org.junit.Test; +import java.util.HashMap; +import java.util.Map; + public class TestResourceUtilization { @Test @@ -60,4 +63,50 @@ public void testResourceUtilization() { u1.subtractFrom(10, 0, 0.0f); Assert.assertEquals(u1, u3); } + + @Test + public void testResourceUtilizationWithCustomResource() { + Map customResources = new HashMap<>(); + customResources.put(ResourceInformation.GPU_URI, 5.0f); + ResourceUtilization u1 = ResourceUtilization. + newInstance(10, 20, 0.5f, customResources); + ResourceUtilization u2 = ResourceUtilization.newInstance(u1); + ResourceUtilization u3 = ResourceUtilization. + newInstance(10, 20, 0.5f, customResources); + ResourceUtilization u4 = ResourceUtilization. + newInstance(20, 20, 0.5f, customResources); + ResourceUtilization u5 = ResourceUtilization. + newInstance(30, 40, 0.8f, customResources); + + Assert.assertEquals(u1, u2); + Assert.assertEquals(u1, u3); + Assert.assertNotEquals(u1, u4); + Assert.assertNotEquals(u2, u5); + Assert.assertNotEquals(u4, u5); + + Assert.assertTrue(u1.hashCode() == u2.hashCode()); + Assert.assertTrue(u1.hashCode() == u3.hashCode()); + Assert.assertFalse(u1.hashCode() == u4.hashCode()); + Assert.assertFalse(u2.hashCode() == u5.hashCode()); + Assert.assertFalse(u4.hashCode() == u5.hashCode()); + + Assert.assertTrue(u1.getPhysicalMemory() == 10); + Assert.assertFalse(u1.getVirtualMemory() == 10); + Assert.assertTrue(u1.getCPU() == 0.5f); + Assert.assertTrue(u1. + getCustomResource(ResourceInformation.GPU_URI) == 5.0f); + + Assert.assertEquals("", u1.toString()); + + u1.addTo(10, 0, 0.0f); + Assert.assertNotEquals(u1, u2); + Assert.assertEquals(u1, u4); + u1.addTo(10, 20, 0.3f); + Assert.assertEquals(u1, u5); + u1.subtractFrom(10, 20, 0.3f); + Assert.assertEquals(u1, u4); + u1.subtractFrom(10, 0, 0.0f); + Assert.assertEquals(u1, u3); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java index 7577b55899781..37fa33e14fcce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java @@ -30,6 +30,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.HashMap; +import java.util.Map; + /** * Implementation of the node resource monitor. It periodically tracks the * resource utilization of the node and reports it to the NM. @@ -54,8 +57,11 @@ public class NodeResourceMonitorImpl extends AbstractService implements private GpuNodeResourceUpdateHandler gpuNodeResourceUpdateHandler; /** Current resource utilization of the node. */ + + private Map customResources = new HashMap<>(); + private ResourceUtilization nodeUtilization = - ResourceUtilization.newInstance(0, 0, 0f); + ResourceUtilization.newInstance(0, 0, 0f, customResources); private Context nmContext; /** @@ -165,22 +171,26 @@ public void run() { resourceCalculatorPlugin.getVirtualMemorySize() - resourceCalculatorPlugin.getAvailableVirtualMemorySize(); float vcores = resourceCalculatorPlugin.getNumVCoresUsed(); - nodeUtilization = - ResourceUtilization.newInstance( - (int) (pmem >> 20), // B -> MB - (int) (vmem >> 20), // B -> MB - vcores); // Used Virtual Cores - float nodeGpuUtilization = 0F; + float totalNodeGpuUtilization = 0F; try { if (gpuNodeResourceUpdateHandler != null) { - nodeGpuUtilization = - gpuNodeResourceUpdateHandler.getNodeGpuUtilization(); + totalNodeGpuUtilization = + gpuNodeResourceUpdateHandler.getTotalNodeGpuUtilization(); } } catch (Exception e) { LOG.error("Get Node GPU Utilization error: " + e); } + customResources. + put(ResourceInformation.GPU_URI, totalNodeGpuUtilization); + nodeUtilization = + ResourceUtilization.newInstance( + (int) (pmem >> 20), // B -> MB + (int) (vmem >> 20), // B -> MB + vcores, // Used Virtual Cores + customResources); // Used GPUs + // Publish the node utilization metrics to node manager // metrics system. NodeManagerMetrics nmMetrics = nmContext.getNodeManagerMetrics(); @@ -188,7 +198,7 @@ public void run() { nmMetrics.setNodeUsedMemGB(nodeUtilization.getPhysicalMemory()); nmMetrics.setNodeUsedVMemGB(nodeUtilization.getVirtualMemory()); nmMetrics.setNodeCpuUtilization(nodeUtilization.getCPU()); - nmMetrics.setNodeGpuUtilization(nodeGpuUtilization); + nmMetrics.setNodeGpuUtilization(totalNodeGpuUtilization); } try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java index af81709566d18..c31555ed962a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java @@ -79,19 +79,48 @@ public void updateConfiguredResource(Resource res) throws YarnException { res.setResourceValue(GPU_URI, nUsableGpus); } - public float getNodeGpuUtilization() throws Exception{ + /** + * + * @return The average physical GPUs used in this node. + * + * For example: + * Node with total 4 GPUs + * Physical used 2.4 GPUs + * Will return 2.4/4 = 0.6f + * + * @throws Exception when any error happens + */ + public float getAvgNodeGpuUtilization() throws Exception{ List gpuList = gpuDiscoverer.getGpuDeviceInformation().getGpus(); - Float totalGpuUtilization = 0F; + Float avgGpuUtilization = 0F; if (gpuList != null && gpuList.size() != 0) { - totalGpuUtilization = gpuList - .stream() - .map(g -> g.getGpuUtilizations().getOverallGpuUtilization()) - .collect(Collectors.summingDouble(Float::floatValue)) - .floatValue() / gpuList.size(); + avgGpuUtilization = getTotalNodeGpuUtilization() / gpuList.size(); } + return avgGpuUtilization; + } + + /** + * + * @return The total physical GPUs used in this node. + * + * For example: + * Node with total 4 GPUs + * Physical used 2.4 GPUs + * Will return 2.4f + * + * @throws Exception when any error happens + */ + public float getTotalNodeGpuUtilization() throws Exception{ + List gpuList = + gpuDiscoverer.getGpuDeviceInformation().getGpus(); + Float totalGpuUtilization = gpuList + .stream() + .map(g -> g.getGpuUtilizations().getOverallGpuUtilization()) + .collect(Collectors.summingDouble(Float::floatValue)) + .floatValue(); return totalGpuUtilization; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java index 749e0cc14de91..da1a57ec005be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java @@ -126,7 +126,7 @@ public void testGetNMResourceInfoAutoDiscoveryDisabled() } @Test - public void testNodeGPUUtilization() + public void testAvgNodeGpuUtilization() throws Exception { GpuDiscoverer gpuDiscoverer = createNodeGPUUtilizationDiscoverer(); @@ -134,7 +134,7 @@ public void testNodeGPUUtilization() new GpuNodeResourceUpdateHandler(gpuDiscoverer, new Configuration()); Assert.assertEquals(0.5F, - gpuNodeResourceUpdateHandler.getNodeGpuUtilization(), 1e-6); + gpuNodeResourceUpdateHandler.getAvgNodeGpuUtilization(), 1e-6); } private GpuDiscoverer createNodeGPUUtilizationDiscoverer() From bfd47e6dd54e6e9b133fe3728dc36f22036ba237 Mon Sep 17 00:00:00 2001 From: Karthik Palanisamy Date: Sat, 1 May 2021 11:05:31 -0700 Subject: [PATCH 286/666] HDFS-15865. Interrupt DataStreamer thread if no ack (#2728) --- .../java/org/apache/hadoop/hdfs/DataStreamer.java | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index 96c86c3569b6b..e04268eddce79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -897,6 +897,8 @@ void waitForAckedSeqno(long seqno) throws IOException { try (TraceScope ignored = dfsClient.getTracer(). newScope("waitForAckedSeqno")) { LOG.debug("{} waiting for ack for: {}", this, seqno); + int dnodes = nodes != null ? nodes.length : 3; + int writeTimeout = dfsClient.getDatanodeWriteTimeout(dnodes); long begin = Time.monotonicNow(); try { synchronized (dataQueue) { @@ -907,6 +909,16 @@ void waitForAckedSeqno(long seqno) throws IOException { } try { dataQueue.wait(1000); // when we receive an ack, we notify on + long duration = Time.monotonicNow() - begin; + if (duration > writeTimeout) { + LOG.error("No ack received, took {}ms (threshold={}ms). " + + "File being written: {}, block: {}, " + + "Write pipeline datanodes: {}.", + duration, writeTimeout, src, block, nodes); + throw new InterruptedIOException("No ack received after " + + duration / 1000 + "s and a timeout of " + + writeTimeout / 1000 + "s"); + } // dataQueue } catch (InterruptedException ie) { throw new InterruptedIOException( From a36582892f4c9f1ee6f4e8c9f3c909d72672e278 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Sat, 1 May 2021 19:10:32 -0700 Subject: [PATCH 287/666] HADOOP-17653. Do not use guava's Files.createTempDir(). (#2945) Reviewed-by: Steve Loughran Signed-off-by: Akira Ajisaka --- .../store/driver/impl/StateStoreFileImpl.java | 13 ++++++++++++- .../impl/TestInMemoryLevelDBAliasMapClient.java | 9 ++++++--- .../impl/TestLevelDbMockAliasMapClient.java | 7 +++++-- .../hadoop/yarn/service/client/ServiceClient.java | 7 +++++-- .../hadoop/yarn/csi/client/TestCsiClient.java | 7 +++++-- .../resourceplugin/com/nec/TestNECVEPlugin.java | 2 +- .../fpga/TestIntelFpgaOpenclPlugin.java | 2 +- .../TestDominantResourceFairnessPolicy.java | 2 +- 8 files changed, 36 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java index cedc784e39f15..9d2b1ab2fb73a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java @@ -88,7 +88,18 @@ protected String getRootDir() { if (this.rootDirectory == null) { String dir = getConf().get(FEDERATION_STORE_FILE_DIRECTORY); if (dir == null) { - File tempDir = Files.createTempDir(); + File tempDirBase = + new File(System.getProperty("java.io.tmpdir")); + File tempDir = null; + try { + tempDir = java.nio.file.Files.createTempDirectory( + tempDirBase.toPath(), System.currentTimeMillis() + "-").toFile(); + } catch (IOException e) { + // fallback to the base upon exception. + LOG.debug("Unable to create a temporary directory. Fall back to " + + " the default system temp directory {}", tempDirBase, e); + tempDir = tempDirBase; + } dir = tempDir.getAbsolutePath(); LOG.warn("The root directory is not available, using {}", dir); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java index 8971730707bfb..1996f4455af5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java @@ -16,8 +16,6 @@ */ package org.apache.hadoop.hdfs.server.common.blockaliasmap.impl; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.io.Files; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -28,7 +26,9 @@ import org.apache.hadoop.hdfs.server.aliasmap.InMemoryLevelDBAliasMapServer; import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap; import org.apache.hadoop.hdfs.server.common.FileRegion; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -44,6 +44,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Files; import java.util.List; import java.util.Optional; import java.util.Random; @@ -74,7 +75,9 @@ public void setUp() throws IOException { conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS, "localhost:" + port); - tempDir = Files.createTempDir(); + File testDir = GenericTestUtils.getTestDir(); + tempDir = Files + .createTempDirectory(testDir.toPath(), "test").toFile(); File levelDBDir = new File(tempDir, BPID); levelDBDir.mkdirs(); conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java index 91e07cfa4c8e4..84b0d23fda5b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.hdfs.server.common.blockaliasmap.impl; -import org.apache.hadoop.thirdparty.com.google.common.io.Files; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -26,12 +25,14 @@ import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap; import org.apache.hadoop.hdfs.server.aliasmap.InMemoryLevelDBAliasMapServer; import org.apache.hadoop.hdfs.server.common.FileRegion; +import org.apache.hadoop.test.GenericTestUtils; import org.iq80.leveldb.DBException; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.File; import java.io.IOException; +import java.nio.file.Files; import static org.assertj.core.api.AssertionsForClassTypes.assertThatExceptionOfType; import static org.mockito.Mockito.doThrow; @@ -60,7 +61,9 @@ public void setUp() throws IOException { conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS, "localhost:" + port); - tempDir = Files.createTempDir(); + File testDir = GenericTestUtils.getTestDir(); + tempDir = Files + .createTempDirectory(testDir.toPath(), "test").toFile(); conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR, tempDir.getAbsolutePath()); levelDBAliasMapServer.setConf(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index 901b81fe14946..8d350158fffdf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -27,7 +27,6 @@ import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.retry.RetryNTimes; -import org.apache.curator.shaded.com.google.common.io.Files; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -117,6 +116,7 @@ import java.net.URISyntaxException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import java.text.MessageFormat; import java.util.*; import java.util.concurrent.ConcurrentHashMap; @@ -1140,7 +1140,10 @@ private void addYarnSysFs(Path path, return; } String buffer = ServiceApiUtil.jsonSerDeser.toJson(app); - File tmpDir = Files.createTempDir(); + File testDir = + new File(System.getProperty("java.io.tmpdir")); + File tmpDir = Files.createTempDirectory( + testDir.toPath(), System.currentTimeMillis() + "-").toFile(); if (tmpDir.exists()) { String serviceJsonPath = tmpDir.getAbsolutePath() + "/app.json"; File localFile = new File(serviceJsonPath); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java index f025bc6d72fda..b60d5cdb86080 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java @@ -20,7 +20,7 @@ import csi.v0.Csi; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.thirdparty.com.google.common.io.Files; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Assume; @@ -30,6 +30,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Files; /** * Test class for CSI client. @@ -42,7 +43,9 @@ public class TestCsiClient { @BeforeClass public static void setUp() throws IOException { - testRoot = Files.createTempDir(); + File testDir = GenericTestUtils.getTestDir(); + testRoot = Files + .createTempDirectory(testDir.toPath(), "test").toFile(); File socketPath = new File(testRoot, "csi.sock"); FileUtils.forceMkdirParent(socketPath); domainSocket = "unix://" + socketPath.getAbsolutePath(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestNECVEPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestNECVEPlugin.java index 06d1d2de6f909..58fc4c7dc1c47 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestNECVEPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestNECVEPlugin.java @@ -46,7 +46,7 @@ import org.apache.commons.compress.utils.Sets; import org.apache.commons.io.FileUtils; -import org.apache.curator.shaded.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.util.Shell.CommandExecutor; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestIntelFpgaOpenclPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestIntelFpgaOpenclPlugin.java index 663df5e26cbcd..0b093953eef34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestIntelFpgaOpenclPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestIntelFpgaOpenclPlugin.java @@ -22,7 +22,7 @@ import java.util.List; import java.util.Map; -import org.apache.curator.shaded.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.fs.Path; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java index c963e0dab63d2..197651a64c16c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java @@ -29,7 +29,7 @@ import java.util.Map; import java.util.TreeSet; -import org.apache.curator.shaded.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; From e6b4754fb62de2d8f16ff68072199ad4fb2b3b2d Mon Sep 17 00:00:00 2001 From: lfengnan Date: Sun, 2 May 2021 03:18:47 -0700 Subject: [PATCH 288/666] HDFS-15810. RBF: RBFMetrics's TotalCapacity out of bounds (#2910) Reviewed-by: Inigo Goiri Signed-off-by: Akira Ajisaka --- .../src/site/markdown/Metrics.md | 9 +++-- .../federation/metrics/FederationMBean.java | 26 ++++++++++++++ .../server/federation/metrics/RBFMetrics.java | 36 +++++++++++++++++-- .../main/webapps/router/federationhealth.html | 6 ++-- .../federation/metrics/TestMetricsBase.java | 11 ++++++ .../federation/metrics/TestRBFMetrics.java | 36 ++++++++++++++++++- 6 files changed, 115 insertions(+), 9 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md index 8423b5351b11f..6cec030ee2557 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md @@ -527,9 +527,12 @@ RBFMetrics shows the metrics which are the aggregated values of sub-clusters' in | `NumInMaintenanceLiveDataNodes` | Number of live Datanodes which are in maintenance state | | `NumInMaintenanceDeadDataNodes` | Number of dead Datanodes which are in maintenance state | | `NumEnteringMaintenanceDataNodes` | Number of Datanodes that are entering the maintenance state | -| `TotalCapacity` | Current raw capacity of DataNodes in bytes | -| `UsedCapacity` | Current used capacity across all DataNodes in bytes | -| `RemainingCapacity` | Current remaining capacity in bytes | +| `TotalCapacity` | Current raw capacity of DataNodes in bytes (long primitive, may overflow) | +| `UsedCapacity` | Current used capacity across all DataNodes in bytes (long primitive, may overflow) | +| `RemainingCapacity` | Current remaining capacity in bytes (long primitive, may overflow) | +| `TotalCapacityBigInt` | Current raw capacity of DataNodes in bytes (using BigInteger) | +| `UsedCapacityBigInt` | Current used capacity across all DataNodes in bytes (using BigInteger) | +| `RemainingCapacityBigInt` | Current remaining capacity in bytes (using BigInteger) | | `NumOfMissingBlocks` | Current number of missing blocks | | `NumLiveNodes` | Number of datanodes which are currently live | | `NumDeadNodes` | Number of datanodes which are currently dead | diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index c06a2e08548ae..b9ea8709e90f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.federation.metrics; +import java.math.BigInteger; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -54,22 +56,46 @@ public interface FederationMBean { /** * Get the total capacity of the federated cluster. + * The number could overflow if too big. In that case use + * {@link #getTotalCapacityBigInt()} instead. * @return Total capacity of the federated cluster. */ long getTotalCapacity(); /** * Get the used capacity of the federated cluster. + * The number could overflow if too big. In that case use + * {@link #getUsedCapacityBigInt()} instead. * @return Used capacity of the federated cluster. */ long getUsedCapacity(); /** * Get the remaining capacity of the federated cluster. + * The number could overflow if too big. In that case use + * {@link #getRemainingCapacityBigInt()} instead. * @return Remaining capacity of the federated cluster. */ long getRemainingCapacity(); + /** + * Get the total capacity (big integer) of the federated cluster. + * @return Total capacity of the federated cluster. + */ + BigInteger getTotalCapacityBigInt(); + + /** + * Get the used capacity (big integer) of the federated cluster. + * @return Used capacity of the federated cluster. + */ + BigInteger getUsedCapacityBigInt(); + + /** + * Get the remaining capacity (big integer) of the federated cluster. + * @return Remaining capacity of the federated cluster. + */ + BigInteger getRemainingCapacityBigInt(); + /** * Get the total remote storage capacity mounted in the federated cluster. * @return Remote capacity of the federated cluster. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java index cc5bf07408246..ec41a32716607 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.lang.reflect.Method; +import java.math.BigInteger; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -380,14 +381,29 @@ public long getRemainingCapacity() { return getNameserviceAggregatedLong(MembershipStats::getAvailableSpace); } + @Override + public long getUsedCapacity() { + return getTotalCapacity() - getRemainingCapacity(); + } + + @Override + public BigInteger getTotalCapacityBigInt() { + return getNameserviceAggregatedBigInt(MembershipStats::getTotalSpace); + } + + @Override + public BigInteger getRemainingCapacityBigInt() { + return getNameserviceAggregatedBigInt(MembershipStats::getAvailableSpace); + } + @Override public long getProvidedSpace() { return getNameserviceAggregatedLong(MembershipStats::getProvidedSpace); } @Override - public long getUsedCapacity() { - return getTotalCapacity() - getRemainingCapacity(); + public BigInteger getUsedCapacityBigInt() { + return getTotalCapacityBigInt().subtract(getRemainingCapacityBigInt()); } @Override @@ -783,6 +799,22 @@ private long getNameserviceAggregatedLong(ToLongFunction f) { } } + private BigInteger getNameserviceAggregatedBigInt( + ToLongFunction f) { + try { + List states = getActiveNamenodeRegistrations(); + BigInteger sum = BigInteger.valueOf(0); + for (MembershipState state : states) { + long lvalue = f.applyAsLong(state.getStats()); + sum = sum.add(BigInteger.valueOf(lvalue)); + } + return sum; + } catch (IOException e) { + LOG.error("Unable to extract metrics: {}", e.getMessage()); + return new BigInteger("0"); + } + } + /** * Fetches the most active namenode memberships for all known nameservices. * The fetched membership may not or may not be active. Excludes expired diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html index 87d0e718bdcd3..eca395ff4b2a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html @@ -113,9 +113,9 @@

    Non Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Commited Non Heap Memory. Max Non Heap Memory is {@eq key=max value="-1" type="number"}<unbounded>{:else}{max|fmt_bytes}{/eq}.

    {/mem.NonHeapMemoryUsage} - - - + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestMetricsBase.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestMetricsBase.java index 4759d05f820dc..b01e22006d776 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestMetricsBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestMetricsBase.java @@ -259,4 +259,15 @@ private MembershipState createRegistration(String ns, String nn, assertTrue(response.getResult()); return record; } + + // refresh namenode registration for new attributes + public boolean refreshNamenodeRegistration(NamenodeHeartbeatRequest request) + throws IOException { + boolean result = membershipStore.namenodeHeartbeat(request).getResult(); + membershipStore.loadCache(true); + MembershipNamenodeResolver resolver = + (MembershipNamenodeResolver) router.getNamenodeResolver(); + resolver.loadCache(true); + return result; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestRBFMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestRBFMetrics.java index 2c7edaa68e195..25473f8df9233 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestRBFMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestRBFMetrics.java @@ -19,11 +19,13 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.getBean; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; import java.io.IOException; +import java.math.BigInteger; import java.util.Iterator; import java.util.List; @@ -31,6 +33,7 @@ import org.apache.commons.collections.ListUtils; import org.apache.hadoop.hdfs.server.federation.router.Router; +import org.apache.hadoop.hdfs.server.federation.store.protocol.NamenodeHeartbeatRequest; import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState; import org.apache.hadoop.hdfs.server.federation.store.records.MembershipStats; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; @@ -58,6 +61,7 @@ public void testClusterStatsJMX() FederationMBean federationBean = getBean(FEDERATION_BEAN, FederationMBean.class); validateClusterStatsFederationBean(federationBean); + testCapacity(federationBean); RouterMBean routerBean = getBean(ROUTER_BEAN, RouterMBean.class); validateClusterStatsRouterBean(routerBean); } @@ -348,4 +352,34 @@ private void validateClusterStatsRouterBean(RouterMBean bean) { assertTrue(bean.getHostAndPort().length() > 0); assertFalse(bean.isSecurityEnabled()); } + + private void testCapacity(FederationMBean bean) throws IOException { + List memberships = getActiveMemberships(); + assertTrue(memberships.size() > 1); + + BigInteger availableCapacity = BigInteger.valueOf(0); + BigInteger totalCapacity = BigInteger.valueOf(0); + BigInteger unitCapacity = BigInteger.valueOf(Long.MAX_VALUE); + for (MembershipState mock : memberships) { + MembershipStats stats = mock.getStats(); + stats.setTotalSpace(Long.MAX_VALUE); + stats.setAvailableSpace(Long.MAX_VALUE); + // reset stats to make the new value persistent + mock.setStats(stats); + // write back the new namenode information to state store + assertTrue(refreshNamenodeRegistration( + NamenodeHeartbeatRequest.newInstance(mock))); + totalCapacity = totalCapacity.add(unitCapacity); + availableCapacity = availableCapacity.add(unitCapacity); + } + + // for local cache update + assertEquals(totalCapacity, bean.getTotalCapacityBigInt()); + // not equal since overflow happened. + assertNotEquals(totalCapacity, BigInteger.valueOf(bean.getTotalCapacity())); + assertEquals(availableCapacity, bean.getRemainingCapacityBigInt()); + // not equal since overflow happened. + assertNotEquals(availableCapacity, + BigInteger.valueOf(bean.getRemainingCapacity())); + } } From 95610f0f8e2349e14c8db6eb5a06272f457053e6 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Tue, 4 May 2021 00:25:29 +0900 Subject: [PATCH 289/666] HDFS-15952. TestRouterRpcMultiDestination#testProxyGetTransactionID and testProxyVersionRequest are flaky. (#2970) --- .../federation/router/TestRouterRpc.java | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index c3e7f10c95a00..4f772cc4dc89c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -26,6 +26,7 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; import static org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.TEST_STRING; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -185,6 +186,7 @@ public int compare( private NamenodeProtocol routerNamenodeProtocol; /** NameNodeProtocol interface to the Namenode. */ private NamenodeProtocol nnNamenodeProtocol; + private NamenodeProtocol nnNamenodeProtocol1; /** Filesystem interface to the Router. */ private FileSystem routerFS; @@ -366,6 +368,11 @@ protected void setNamenode(NamenodeContext nn) NamenodeContext nn0 = cluster.getNamenode(ns0, null); this.nnNamenodeProtocol = NameNodeProxies.createProxy(nn0.getConf(), nn0.getFileSystem().getUri(), NamenodeProtocol.class).getProxy(); + // Namenode from the other namespace + String ns1 = cluster.getNameservices().get(1); + NamenodeContext nn1 = cluster.getNamenode(ns1, null); + this.nnNamenodeProtocol1 = NameNodeProxies.createProxy(nn1.getConf(), + nn1.getFileSystem().getUri(), NamenodeProtocol.class).getProxy(); } protected String getNs() { @@ -1302,11 +1309,14 @@ public void testProxyVersionRequest() throws Exception { // Check with default namespace specified. NamespaceInfo rVersion = routerNamenodeProtocol.versionRequest(); NamespaceInfo nnVersion = nnNamenodeProtocol.versionRequest(); + NamespaceInfo nnVersion1 = nnNamenodeProtocol1.versionRequest(); compareVersion(rVersion, nnVersion); // Check with default namespace unspecified. resolver.setDisableNamespace(true); - rVersion = routerNamenodeProtocol.versionRequest(); - compareVersion(rVersion, nnVersion); + // Verify the NamespaceInfo is of nn0 or nn1 + boolean isNN0 = + rVersion.getBlockPoolID().equals(nnVersion.getBlockPoolID()); + compareVersion(rVersion, isNN0 ? nnVersion : nnVersion1); } finally { resolver.setDisableNamespace(false); } @@ -1375,11 +1385,13 @@ public void testProxyGetTransactionID() throws IOException { // Check with default namespace specified. long routerTransactionID = routerNamenodeProtocol.getTransactionID(); long nnTransactionID = nnNamenodeProtocol.getTransactionID(); + long nnTransactionID1 = nnNamenodeProtocol1.getTransactionID(); assertEquals(nnTransactionID, routerTransactionID); // Check with default namespace unspecified. resolver.setDisableNamespace(true); + // Verify the transaction ID is of nn0 or nn1 routerTransactionID = routerNamenodeProtocol.getTransactionID(); - assertEquals(nnTransactionID, routerTransactionID); + assertThat(routerTransactionID).isIn(nnTransactionID, nnTransactionID1); } finally { resolver.setDisableNamespace(false); } From f5d333eebb022e0fb91f8aebe9f5ce204cf00521 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 3 May 2021 21:40:26 +0530 Subject: [PATCH 290/666] HADOOP-17676. Restrict imports from org.apache.curator.shaded (#2969) Signed-off-by: Akira Ajisaka --- pom.xml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/pom.xml b/pom.xml index 32090f86205d3..5056869cb2673 100644 --- a/pom.xml +++ b/pom.xml @@ -101,6 +101,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x 2.4 3.0.2 3.0.0-M1 + 1.1.0 3.0.1 1.5 1.5 @@ -160,6 +161,33 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-thirdparty shaded instead of curator shaded + + org.apache.curator.shaded.** + + + + + + org.apache.maven.plugins From 36bf11945bfcd088de73e9392c5271499470c29a Mon Sep 17 00:00:00 2001 From: kishendas Date: Tue, 4 May 2021 01:20:56 -0700 Subject: [PATCH 291/666] HADOOP-17657: implement StreamCapabilities in SequenceFile.Writer and fall back to flush, if hflush is not supported (#2949) Co-authored-by: Kishen Das Reviewed-by: Steve Loughran --- .../org/apache/hadoop/io/SequenceFile.java | 19 +++++++++++++- .../apache/hadoop/io/TestSequenceFile.java | 26 +++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java index 3f4649f04dc9a..0581fb3f577a0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java @@ -27,6 +27,7 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.util.Options; import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.io.compress.CodecPool; import org.apache.hadoop.io.compress.CompressionCodec; @@ -834,7 +835,8 @@ public String toString() { } /** Write key/value pairs to a sequence-format file. */ - public static class Writer implements java.io.Closeable, Syncable { + public static class Writer implements java.io.Closeable, Syncable, + Flushable, StreamCapabilities { private Configuration conf; FSDataOutputStream out; boolean ownOutputStream = true; @@ -1367,6 +1369,21 @@ public void hflush() throws IOException { out.hflush(); } } + + @Override + public void flush() throws IOException { + if (out != null) { + out.flush(); + } + } + + @Override + public boolean hasCapability(String capability) { + if (out !=null && capability != null) { + return out.hasCapability(capability); + } + return false; + } /** Returns the configuration of this file. */ Configuration getConf() { return conf; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java index cf64bbc0f9457..8b604d9c7a44b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java @@ -30,6 +30,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.conf.*; +import org.assertj.core.api.Assertions; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -730,6 +731,31 @@ public void testSerializationAvailability() throws IOException { } } + @Test + public void testSequenceFileWriter() throws Exception { + Configuration conf = new Configuration(); + // This test only works with Raw File System and not Local File System + FileSystem fs = FileSystem.getLocal(conf).getRaw(); + Path p = new Path(GenericTestUtils + .getTempPath("testSequenceFileWriter.seq")); + try(SequenceFile.Writer writer = SequenceFile.createWriter( + fs, conf, p, LongWritable.class, Text.class)) { + Assertions.assertThat(writer.hasCapability + (StreamCapabilities.HSYNC)).isEqualTo(true); + Assertions.assertThat(writer.hasCapability( + StreamCapabilities.HFLUSH)).isEqualTo(true); + LongWritable key = new LongWritable(); + key.set(1); + Text value = new Text(); + value.set("somevalue"); + writer.append(key, value); + writer.flush(); + writer.hflush(); + writer.hsync(); + Assertions.assertThat(fs.getFileStatus(p).getLen()).isGreaterThan(0); + } + } + /** For debugging and testing. */ public static void main(String[] args) throws Exception { int count = 1024 * 1024; From e5efd51de8fa0515cd76c3725dbaada05361c40f Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Tue, 4 May 2021 14:01:23 +0530 Subject: [PATCH 292/666] HDFS-15982. Deleted data using HTTP API should be saved to the trash (#2927) Reviewed-by: Siyao Meng --- .../web/resources/DeleteSkipTrashParam.java | 50 +++++++++++++++ .../fs/http/client/HttpFSFileSystem.java | 1 + .../hadoop/fs/http/server/FSOperations.java | 28 ++++++++- .../http/server/HttpFSParametersProvider.java | 22 ++++++- .../hadoop/fs/http/server/HttpFSServer.java | 9 ++- .../fs/http/server/TestHttpFSServer.java | 62 +++++++++++++++++++ .../src/main/webapps/router/explorer.html | 23 ++++++- .../src/main/webapps/router/explorer.js | 50 +++++++++++---- .../web/resources/NamenodeWebHdfsMethods.java | 61 +++++++++++++----- .../src/main/webapps/hdfs/explorer.html | 23 ++++++- .../src/main/webapps/hdfs/explorer.js | 46 ++++++++++---- .../hadoop-hdfs/src/site/markdown/WebHDFS.md | 6 +- .../apache/hadoop/hdfs/web/TestWebHDFS.java | 35 ++++++++++- 13 files changed, 363 insertions(+), 53 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java new file mode 100644 index 0000000000000..5ca9d69d7c870 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +/** + * SkipTrash param to be used by DELETE query. + */ +public class DeleteSkipTrashParam extends BooleanParam { + + public static final String NAME = "skiptrash"; + public static final String DEFAULT = FALSE; + + private static final Domain DOMAIN = new Domain(NAME); + + /** + * Constructor. + * @param value the parameter value. + */ + public DeleteSkipTrashParam(final Boolean value) { + super(DOMAIN, value); + } + + /** + * Constructor. + * @param str a string representation of the parameter value. + */ + public DeleteSkipTrashParam(final String str) { + this(DOMAIN.parse(str)); + } + + @Override + public String getName() { + return NAME; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 2866044badcd2..3cf7dad1dad31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -122,6 +122,7 @@ public class HttpFSFileSystem extends FileSystem public static final String ACLSPEC_PARAM = "aclspec"; public static final String DESTINATION_PARAM = "destination"; public static final String RECURSIVE_PARAM = "recursive"; + public static final String SKIP_TRASH_PARAM = "skiptrash"; public static final String SOURCES_PARAM = "sources"; public static final String OWNER_PARAM = "owner"; public static final String GROUP_PARAM = "group"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index e272cdc71b686..a092db09eed08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.http.client.HttpFSFileSystem; @@ -53,6 +54,8 @@ import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.apache.hadoop.fs.permission.FsCreateModes; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; import java.io.IOException; @@ -75,6 +78,8 @@ @InterfaceAudience.Private public final class FSOperations { + private static final Logger LOG = LoggerFactory.getLogger(FSOperations.class); + private static int bufferSize = 4096; private FSOperations() { @@ -717,18 +722,22 @@ public static long copyBytes(InputStream in, OutputStream out, long count) */ @InterfaceAudience.Private public static class FSDelete implements FileSystemAccess.FileSystemExecutor { - private Path path; - private boolean recursive; + private final Path path; + private final boolean recursive; + private final boolean skipTrash; /** * Creates a Delete executor. * * @param path path to delete. * @param recursive if the delete should be recursive or not. + * @param skipTrash if the file must be deleted and not kept in trash + * regardless of fs.trash.interval config value. */ - public FSDelete(String path, boolean recursive) { + public FSDelete(String path, boolean recursive, boolean skipTrash) { this.path = new Path(path); this.recursive = recursive; + this.skipTrash = skipTrash; } /** @@ -743,6 +752,19 @@ public FSDelete(String path, boolean recursive) { */ @Override public JSONObject execute(FileSystem fs) throws IOException { + if (!skipTrash) { + boolean movedToTrash = Trash.moveToAppropriateTrash(fs, path, + fs.getConf()); + if (movedToTrash) { + HttpFSServerWebApp.getMetrics().incrOpsDelete(); + return toJSON( + StringUtils.toLowerCase(HttpFSFileSystem.DELETE_JSON), true); + } + // Same is the behavior with Delete shell command. + // If moveToAppropriateTrash() returns false, file deletion + // is attempted rather than throwing Error. + LOG.debug("Could not move {} to Trash, attempting removal", path); + } boolean deleted = fs.delete(path, recursive); HttpFSServerWebApp.get().getMetrics().incrOpsDelete(); return toJSON( diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index f6c84dcae4e07..b810ec6690372 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -81,7 +81,8 @@ public class HttpFSParametersProvider extends ParametersProvider { new Class[]{ReplicationParam.class}); PARAMS_DEF.put(Operation.SETTIMES, new Class[]{ModifiedTimeParam.class, AccessTimeParam.class}); - PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class}); + PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class, + DeleteSkipTrashParam.class}); PARAMS_DEF.put(Operation.SETACL, new Class[]{AclPermissionParam.class}); PARAMS_DEF.put(Operation.REMOVEACL, new Class[]{}); PARAMS_DEF.put(Operation.MODIFYACLENTRIES, @@ -242,6 +243,25 @@ public RecursiveParam() { } } + /** + * Class for delete's skipTrash parameter. + */ + @InterfaceAudience.Private + public static class DeleteSkipTrashParam extends BooleanParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.SKIP_TRASH_PARAM; + + /** + * Constructor. + */ + public DeleteSkipTrashParam() { + super(NAME, false); + } + } + /** * Class for filter parameter. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index d0d76d6289912..e3254d5bd9eff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AclPermissionParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam; +import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DeleteSkipTrashParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ECPolicyParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam; @@ -548,9 +549,13 @@ public Response delete(@PathParam("path") String path, case DELETE: { Boolean recursive = params.get(RecursiveParam.NAME, RecursiveParam.class); - AUDIT_LOG.info("[{}] recursive [{}]", path, recursive); + Boolean skipTrashParam = params.get(DeleteSkipTrashParam.NAME, + DeleteSkipTrashParam.class); + boolean skipTrash = skipTrashParam != null && skipTrashParam; + AUDIT_LOG.info("[{}] recursive [{}] skipTrash [{}]", path, recursive, + skipTrash); FSOperations.FSDelete command = - new FSOperations.FSDelete(path, recursive); + new FSOperations.FSDelete(path, recursive, skipTrash); JSONObject json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index 6aa8aa346ef9b..cddf95e22129f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs.http.server; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -537,6 +538,36 @@ private void createWithHttp(String filename, String perms, Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); } + private void deleteWithHttp(String filename, String perms, + String unmaskedPerms, Boolean skipTrash) throws Exception { + String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + // Remove leading / from filename + if (filename.charAt(0) == '/') { + filename = filename.substring(1); + } + String pathOps; + if (perms == null) { + pathOps = MessageFormat.format("/webhdfs/v1/{0}?user.name={1}&op=DELETE", + filename, user); + } else { + pathOps = MessageFormat.format( + "/webhdfs/v1/{0}?user.name={1}&permission={2}&op=DELETE", + filename, user, perms); + } + if (unmaskedPerms != null) { + pathOps = pathOps + "&unmaskedpermission=" + unmaskedPerms; + } + if (skipTrash != null) { + pathOps = pathOps + "&skiptrash=" + skipTrash; + } + URL url = new URL(TestJettyHelper.getJettyURL(), pathOps); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.addRequestProperty("Content-Type", "application/octet-stream"); + conn.setRequestMethod("DELETE"); + conn.connect(); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + } + /** * Talks to the http interface to create a directory. * @@ -782,6 +813,37 @@ public void testPerms() throws Exception { Assert.assertTrue("321".equals(getPerms(statusJson))); } + /** + * Validate create and delete calls. + */ + @Test + @TestDir + @TestJetty + @TestHdfs + public void testCreateDelete() throws Exception { + final String dir1 = "/testCreateDelete1"; + final String path1 = dir1 + "/file1"; + final String dir2 = "/testCreateDelete2"; + final String path2 = dir2 + "/file2"; + + createHttpFSServer(false, false); + final Configuration conf = HttpFSServerWebApp.get() + .get(FileSystemAccess.class).getFileSystemConfiguration(); + conf.setLong(FS_TRASH_INTERVAL_KEY, 5); + writeConf(conf, "hdfs-site.xml"); + + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + fs.mkdirs(new Path(dir1)); + + createWithHttp(path1, null); + deleteWithHttp(path1, null, null, null); + + fs.mkdirs(new Path(dir2)); + + createWithHttp(path2, null); + deleteWithHttp(path2, null, null, true); + } + /** * Validate XAttr get/set/remove calls. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html index 80b38e7165f78..3150d87bdce62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html @@ -165,11 +165,32 @@ +
    diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js index cb16eac7b1221..0895eb967a6f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js @@ -82,23 +82,47 @@ function delete_path(inode_name, absolute_file_path) { $('#delete-modal-title').text("Delete - " + inode_name); $('#delete-prompt').text("Are you sure you want to delete " + inode_name - + " ?"); - - $('#delete-button').click(function() { + + " ?"); + $('#delete-trash-modal-title').text("Skip Trash - " + inode_name); + $('#delete-trash-prompt').text("Skipping Trash might delete file forever." + + " Do you want to skip-trash " + inode_name + + " ? (default behaviour - No)"); + + $('#skip-trash-button').click(function () { + // DELETE /webhdfs/v1/?op=DELETE&recursive=&skiptrash=true + var url = '/webhdfs/v1' + encode_path(absolute_file_path) + + '?op=DELETE' + '&recursive=true&skiptrash=true'; + $.ajax(url, + { + type: 'DELETE' + }).done(function (data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function () { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + $('#delete-trash-modal').modal('hide'); + $('#skip-trash-button').button('reset'); + }); + }) + $('#trash-button').click(function () { // DELETE /webhdfs/v1/?op=DELETE&recursive= var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true'; - + '?op=DELETE' + '&recursive=true'; $.ajax(url, - { type: 'DELETE' - }).done(function(data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function() { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - }); + { + type: 'DELETE' + }).done(function (data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function () { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + $('#delete-trash-modal').modal('hide'); + $('#trash-button').button('reset'); + }); }) + $('#delete-modal').modal(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index e7bf32fcc05f0..2703349db8704 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -55,8 +55,10 @@ import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -119,6 +121,9 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.spi.container.ResourceFilters; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; + /** Web-hdfs NameNode implementation. */ @Path("") @ResourceFilters(ParamFilter.class) @@ -1503,10 +1508,13 @@ public Response deleteRoot( @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT) final RecursiveParam recursive, @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT) - final SnapshotNameParam snapshotName + final SnapshotNameParam snapshotName, + @QueryParam(DeleteSkipTrashParam.NAME) + @DefaultValue(DeleteSkipTrashParam.DEFAULT) + final DeleteSkipTrashParam skiptrash ) throws IOException, InterruptedException { return delete(ugi, delegation, username, doAsUser, ROOT, op, recursive, - snapshotName); + snapshotName, skiptrash); } /** Handle HTTP DELETE request. */ @@ -1527,34 +1535,53 @@ public Response delete( @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT) final RecursiveParam recursive, @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT) - final SnapshotNameParam snapshotName + final SnapshotNameParam snapshotName, + @QueryParam(DeleteSkipTrashParam.NAME) + @DefaultValue(DeleteSkipTrashParam.DEFAULT) + final DeleteSkipTrashParam skiptrash ) throws IOException, InterruptedException { - init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName); + init(ugi, delegation, username, doAsUser, path, op, recursive, + snapshotName, skiptrash); - return doAs(ugi, new PrivilegedExceptionAction() { - @Override - public Response run() throws IOException { - return delete(ugi, delegation, username, doAsUser, - path.getAbsolutePath(), op, recursive, snapshotName); - } - }); + return doAs(ugi, () -> delete( + path.getAbsolutePath(), op, recursive, snapshotName, skiptrash)); } protected Response delete( - final UserGroupInformation ugi, - final DelegationParam delegation, - final UserParam username, - final DoAsParam doAsUser, final String fullpath, final DeleteOpParam op, final RecursiveParam recursive, - final SnapshotNameParam snapshotName - ) throws IOException { + final SnapshotNameParam snapshotName, + final DeleteSkipTrashParam skipTrash) throws IOException { final ClientProtocol cp = getRpcClientProtocol(); switch(op.getValue()) { case DELETE: { + Configuration conf = + (Configuration) context.getAttribute(JspHelper.CURRENT_CONF); + long trashInterval = + conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); + if (trashInterval > 0 && !skipTrash.getValue()) { + LOG.info("{} is {} , trying to archive {} instead of removing", + FS_TRASH_INTERVAL_KEY, trashInterval, fullpath); + org.apache.hadoop.fs.Path path = + new org.apache.hadoop.fs.Path(fullpath); + Configuration clonedConf = new Configuration(conf); + // To avoid caching FS objects and prevent OOM issues + clonedConf.set("fs.hdfs.impl.disable.cache", "true"); + FileSystem fs = FileSystem.get(clonedConf); + boolean movedToTrash = Trash.moveToAppropriateTrash(fs, path, + clonedConf); + if (movedToTrash) { + final String js = JsonUtil.toJsonString("boolean", true); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } + // Same is the behavior with Delete shell command. + // If moveToAppropriateTrash() returns false, file deletion + // is attempted rather than throwing Error. + LOG.debug("Could not move {} to Trash, attempting removal", fullpath); + } final boolean b = cp.delete(fullpath, recursive.getValue()); final String js = JsonUtil.toJsonString("boolean", b); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html index 3f0509a229700..eb7ca3504891a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -166,11 +166,32 @@
    +
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js index ea8b0accbde6c..d9371bb9288e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js @@ -83,22 +83,46 @@ $('#delete-modal-title').text("Delete - " + inode_name); $('#delete-prompt').text("Are you sure you want to delete " + inode_name + " ?"); + $('#delete-trash-modal-title').text("Skip Trash - " + inode_name); + $('#delete-trash-prompt').text("Skipping Trash might delete file forever." + + " Do you want to skip-trash " + inode_name + + " ? (default behaviour - No)"); - $('#delete-button').click(function() { + $('#skip-trash-button').click(function () { + // DELETE /webhdfs/v1/?op=DELETE&recursive=&skiptrash=true + var url = '/webhdfs/v1' + encode_path(absolute_file_path) + + '?op=DELETE' + '&recursive=true&skiptrash=true'; + $.ajax(url, + { + type: 'DELETE' + }).done(function (data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function () { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + $('#delete-trash-modal').modal('hide'); + $('#skip-trash-button').button('reset'); + }); + }) + $('#trash-button').click(function () { // DELETE /webhdfs/v1/?op=DELETE&recursive= var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true'; - + '?op=DELETE' + '&recursive=true'; $.ajax(url, - { type: 'DELETE' - }).done(function(data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function() { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - }); + { + type: 'DELETE' + }).done(function (data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function () { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + $('#delete-trash-modal').modal('hide'); + $('#trash-button').button('reset'); + }); }) + $('#delete-modal').modal(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 3fb68d40e929b..07891ae64b3e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -462,7 +462,11 @@ See also: [`destination`](#Destination), [FileSystem](../../api/org/apache/hadoo * Submit a HTTP DELETE request. curl -i -X DELETE "http://:/webhdfs/v1/?op=DELETE - [&recursive=]" + [&recursive=][&skiptrash=]" + + Default values of queryparams if not provided: + 1. recursive: false + 2. skiptrash: false The client receives a response with a [`boolean` JSON object](#Boolean_JSON_Schema): diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 68087c4c4ed6e..698c323e9a970 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.web; +import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER; +import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; @@ -58,6 +60,8 @@ import java.util.NoSuchElementException; import java.util.Random; +import org.apache.hadoop.hdfs.web.resources.DeleteSkipTrashParam; +import org.apache.hadoop.hdfs.web.resources.RecursiveParam; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.QuotaUsage; @@ -1559,8 +1563,12 @@ private void checkResponseContainsLocation(URL url, String TYPE) HttpURLConnection.HTTP_OK, conn.getResponseCode()); JSONObject responseJson = new JSONObject(response); - Assert.assertTrue("Response didn't give us a location. " + response, - responseJson.has("Location")); + if (!TYPE.equals("DELETE")) { + Assert.assertTrue("Response didn't give us a location. " + response, + responseJson.has("Location")); + } else { + Assert.assertTrue(responseJson.getBoolean("boolean")); + } //Test that the DN allows CORS on Create if(TYPE.equals("CREATE")) { @@ -1572,14 +1580,15 @@ private void checkResponseContainsLocation(URL url, String TYPE) } } - @Test /** * Test that when "&noredirect=true" is added to operations CREATE, APPEND, * OPEN, and GETFILECHECKSUM the response (which is usually a 307 temporary * redirect) is a 200 with JSON that contains the redirected location */ + @Test public void testWebHdfsNoRedirect() throws Exception { final Configuration conf = WebHdfsTestUtil.createConf(); + conf.setLong(FS_TRASH_INTERVAL_KEY, 5); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); LOG.info("Started cluster"); InetSocketAddress addr = cluster.getNameNode().getHttpAddress(); @@ -1618,6 +1627,26 @@ public void testWebHdfsNoRedirect() throws Exception { + Param.toSortedString("&", new NoRedirectParam(true))); LOG.info("Sending append request " + url); checkResponseContainsLocation(url, "POST"); + + // setup some permission to allow moving file to .Trash location + cluster.getFileSystem().setPermission(new Path("/testWebHdfsNoRedirect"), + new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); + Path userDir = new Path(FileSystem.USER_HOME_PREFIX); + Path trashDir = new Path(FileSystem.USER_HOME_PREFIX, DEFAULT_HADOOP_HTTP_STATIC_USER); + Path trashPath = new Path(FileSystem.USER_HOME_PREFIX, + new Path(DEFAULT_HADOOP_HTTP_STATIC_USER, FileSystem.TRASH_PREFIX)); + cluster.getFileSystem().mkdirs(userDir, FsPermission.getDirDefault()); + cluster.getFileSystem().mkdir(trashDir, FsPermission.getDirDefault()); + cluster.getFileSystem().mkdir(trashPath, FsPermission.getDirDefault()); + cluster.getFileSystem().setOwner(trashPath, DEFAULT_HADOOP_HTTP_STATIC_USER, HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT); + cluster.getFileSystem().setPermission(new Path("/"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); + + url = new URL("http", addr.getHostString(), addr.getPort(), + WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirect" + "?op=DELETE" + + Param.toSortedString("&", new RecursiveParam(true)) + + Param.toSortedString("&", new DeleteSkipTrashParam(false))); + LOG.info("Sending append request " + url); + checkResponseContainsLocation(url, "DELETE"); } @Test From 5dcd1121ef2f15a53d2b39bad7de9a6f9f10f8dc Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Tue, 4 May 2021 10:49:52 +0100 Subject: [PATCH 293/666] HADOOP-16742. NullPointerException in S3A MultiObjectDeleteSupport Contributed by Tor Arvid Lund. Change-Id: Iadfe9b2f355cf373031075bfbe681705a2c65bdc --- .../org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java index 0a8338934c9ef..d2b3899a69104 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java @@ -106,7 +106,7 @@ public static IOException translateDeleteException( error.getMessage()); LOG.warn(item); result.append(item); - if (exitCode.isEmpty() || ACCESS_DENIED.equals(code)) { + if (exitCode == null || exitCode.isEmpty() || ACCESS_DENIED.equals(code)) { exitCode = code; } } From acc5f2f804dcd85e707978a2bf3b437b5bb5d429 Mon Sep 17 00:00:00 2001 From: Istvan Fajth Date: Tue, 4 May 2021 12:33:59 +0200 Subject: [PATCH 294/666] HADOOP-17675. LdapGroupsMapping$LdapSslSocketFactory ClassNotFoundException (#2965) --- .../hadoop/security/LdapGroupsMapping.java | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java index 10483a2c56571..17934add81413 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java @@ -643,7 +643,29 @@ private DirContext getDirContext() throws NamingException { env.put("com.sun.jndi.ldap.read.timeout", conf.get(READ_TIMEOUT, String.valueOf(READ_TIMEOUT_DEFAULT))); - ctx = new InitialDirContext(env); + // See HADOOP-17675 for details TLDR: + // From a native thread the thread's context classloader is null. + // jndi internally in the InitialDirContext specifies the context + // classloader for Class.forName, and as it is null, jndi will use the + // bootstrap classloader in this case to laod the socket factory + // implementation. + // BUT + // Bootstrap classloader does not have it in its classpath, so throws a + // ClassNotFoundException. + // This affects Impala for example when it uses LdapGroupsMapping. + ClassLoader currentContextLoader = + Thread.currentThread().getContextClassLoader(); + if (currentContextLoader == null) { + try { + Thread.currentThread().setContextClassLoader( + this.getClass().getClassLoader()); + ctx = new InitialDirContext(env); + } finally { + Thread.currentThread().setContextClassLoader(null); + } + } else { + ctx = new InitialDirContext(env); + } } return ctx; } From 8a5eeef20015e39c91295c5580dca2b774f1c700 Mon Sep 17 00:00:00 2001 From: zhuqi Date: Wed, 5 May 2021 23:32:13 +0800 Subject: [PATCH 295/666] HDFS-15934. Make DirectoryScanner reconcile blocks batch size and interval between batch configurable. Contributed by Qi Zhu. (#2833) Signed-off-by: Ayush Saxena --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 8 ++++ .../server/datanode/DirectoryScanner.java | 44 +++++++++++++++++-- .../src/main/resources/hdfs-default.xml | 12 +++++ 3 files changed, 60 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 51fc564a97584..77355be0c2664 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -836,6 +836,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT = 21600; public static final String DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY = "dfs.datanode.directoryscan.threads"; public static final int DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT = 1; + public static final String DFS_DATANODE_RECONCILE_BLOCKS_BATCH_SIZE = + "dfs.datanode.reconcile.blocks.batch.size"; + public static final int + DFS_DATANODE_RECONCILE_BLOCKS_BATCH_SIZE_DEFAULT = 1000; + public static final String DFS_DATANODE_RECONCILE_BLOCKS_BATCH_INTERVAL + = "dfs.datanode.reconcile.blocks.batch.interval"; + public static final long + DFS_DATANODE_RECONCILE_BLOCKS_BATCH_INTERVAL_DEFAULT = 2000; public static final String DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY = "dfs.datanode.disk.check.min.gap"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index a3bceec9a96a7..7a8569d39008e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -65,7 +65,8 @@ public class DirectoryScanner implements Runnable { LoggerFactory.getLogger(DirectoryScanner.class); private static final int DEFAULT_MAP_SIZE = 32768; - private static final int RECONCILE_BLOCKS_BATCH_SIZE = 1000; + private final int reconcileBlocksBatchSize; + private final long reconcileBlocksBatchInterval; private final FsDatasetSpi dataset; private final ExecutorService reportCompileThreadPool; private final ScheduledExecutorService masterThread; @@ -315,6 +316,41 @@ public DirectoryScanner(FsDatasetSpi dataset, Configuration conf) { masterThread = new ScheduledThreadPoolExecutor(1, new Daemon.DaemonFactory()); + + int reconcileBatchSize = + conf.getInt(DFSConfigKeys. + DFS_DATANODE_RECONCILE_BLOCKS_BATCH_SIZE, + DFSConfigKeys. + DFS_DATANODE_RECONCILE_BLOCKS_BATCH_SIZE_DEFAULT); + + if (reconcileBatchSize <= 0) { + LOG.warn("Invalid value configured for " + + "dfs.datanode.reconcile.blocks.batch.size, " + + "should be greater than 0, Using default."); + reconcileBatchSize = + DFSConfigKeys. + DFS_DATANODE_RECONCILE_BLOCKS_BATCH_SIZE_DEFAULT; + } + + reconcileBlocksBatchSize = reconcileBatchSize; + + long reconcileBatchInterval = + conf.getTimeDuration(DFSConfigKeys. + DFS_DATANODE_RECONCILE_BLOCKS_BATCH_INTERVAL, + DFSConfigKeys. + DFS_DATANODE_RECONCILE_BLOCKS_BATCH_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); + + if (reconcileBatchInterval <= 0) { + LOG.warn("Invalid value configured for " + + "dfs.datanode.reconcile.blocks.batch.interval, " + + "should be greater than 0, Using default."); + reconcileBatchInterval = + DFSConfigKeys. + DFS_DATANODE_RECONCILE_BLOCKS_BATCH_INTERVAL_DEFAULT; + } + + reconcileBlocksBatchInterval = reconcileBatchInterval; } /** @@ -428,16 +464,16 @@ public void reconcile() throws IOException { LOG.debug("reconcile start DirectoryScanning"); scan(); - // HDFS-14476: run checkAndUpadte with batch to avoid holding the lock too + // HDFS-14476: run checkAndUpdate with batch to avoid holding the lock too // long int loopCount = 0; synchronized (diffs) { for (final Map.Entry entry : diffs.getEntries()) { dataset.checkAndUpdate(entry.getKey(), entry.getValue()); - if (loopCount % RECONCILE_BLOCKS_BATCH_SIZE == 0) { + if (loopCount % reconcileBlocksBatchSize == 0) { try { - Thread.sleep(2000); + Thread.sleep(reconcileBlocksBatchInterval); } catch (InterruptedException e) { // do nothing } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index a30889bdd0042..6e8e36e1cf0dc 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -873,6 +873,18 @@ + + dfs.datanode.reconcile.blocks.batch.size + 1000 + Setting this to define reconcile batch size. + + + + dfs.datanode.reconcile.blocks.batch.interval + 2000 + Setting this to define interval between batches. + + dfs.heartbeat.interval 3s From 83d8e8c8ba031fded58e61acbecbbc24cd2c10ed Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 6 May 2021 01:22:02 +0530 Subject: [PATCH 296/666] HADOOP-11616. Remove workaround for Curator's ChildReaper requiring Guava 15+ (#2973) Reviewed-by: Wei-Chiu Chuang Signed-off-by: Akira Ajisaka --- .../hadoop/util/curator/ChildReaper.java | 234 ------------------ .../hadoop/util/curator/TestChildReaper.java | 209 ---------------- 2 files changed, 443 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestChildReaper.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java deleted file mode 100644 index e125dbfbd0abb..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java +++ /dev/null @@ -1,234 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hadoop.util.curator; - -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.curator.framework.recipes.locks.Reaper; -import org.apache.curator.utils.CloseableUtils; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.utils.CloseableScheduledExecutorService; -import org.apache.curator.utils.ThreadUtils; -import org.apache.curator.utils.ZKPaths; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.zookeeper.data.Stat; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import java.io.Closeable; -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import org.apache.curator.utils.PathUtils; - -/** - * This is a copy of Curator 2.7.1's ChildReaper class, modified to work with - * Guava 11.0.2. The problem is the 'paths' Collection, which calls Guava's - * Sets.newConcurrentHashSet(), which was added in Guava 15.0. - *

    - * Utility to reap empty child nodes of a parent node. Periodically calls getChildren on - * the node and adds empty nodes to an internally managed {@link Reaper} - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public class ChildReaper implements Closeable -{ - private final Logger log = LoggerFactory.getLogger(getClass()); - private final Reaper reaper; - private final AtomicReference state = new AtomicReference(State.LATENT); - private final CuratorFramework client; - private final Collection paths = newConcurrentHashSet(); - private final Reaper.Mode mode; - private final CloseableScheduledExecutorService executor; - private final int reapingThresholdMs; - - private volatile Future task; - - // This is copied from Curator's Reaper class - static final int DEFAULT_REAPING_THRESHOLD_MS = (int)TimeUnit.MILLISECONDS.convert(5, TimeUnit.MINUTES); - - // This is copied from Guava - /** - * Creates a thread-safe set backed by a hash map. The set is backed by a - * {@link ConcurrentHashMap} instance, and thus carries the same concurrency - * guarantees. - * - *

    Unlike {@code HashSet}, this class does NOT allow {@code null} to be - * used as an element. The set is serializable. - * - * @return a new, empty thread-safe {@code Set} - * @since 15.0 - */ - public static Set newConcurrentHashSet() { - return Collections.newSetFromMap(new ConcurrentHashMap()); - } - - private enum State - { - LATENT, - STARTED, - CLOSED - } - - /** - * @param client the client - * @param path path to reap children from - * @param mode reaping mode - */ - public ChildReaper(CuratorFramework client, String path, Reaper.Mode mode) - { - this(client, path, mode, newExecutorService(), DEFAULT_REAPING_THRESHOLD_MS, null); - } - - /** - * @param client the client - * @param path path to reap children from - * @param reapingThresholdMs threshold in milliseconds that determines that a path can be deleted - * @param mode reaping mode - */ - public ChildReaper(CuratorFramework client, String path, Reaper.Mode mode, int reapingThresholdMs) - { - this(client, path, mode, newExecutorService(), reapingThresholdMs, null); - } - - /** - * @param client the client - * @param path path to reap children from - * @param executor executor to use for background tasks - * @param reapingThresholdMs threshold in milliseconds that determines that a path can be deleted - * @param mode reaping mode - */ - public ChildReaper(CuratorFramework client, String path, Reaper.Mode mode, ScheduledExecutorService executor, int reapingThresholdMs) - { - this(client, path, mode, executor, reapingThresholdMs, null); - } - - /** - * @param client the client - * @param path path to reap children from - * @param executor executor to use for background tasks - * @param reapingThresholdMs threshold in milliseconds that determines that a path can be deleted - * @param mode reaping mode - * @param leaderPath if not null, uses a leader selection so that only 1 reaper is active in the cluster - */ - public ChildReaper(CuratorFramework client, String path, Reaper.Mode mode, ScheduledExecutorService executor, int reapingThresholdMs, String leaderPath) - { - this.client = client; - this.mode = mode; - this.executor = new CloseableScheduledExecutorService(executor); - this.reapingThresholdMs = reapingThresholdMs; - this.reaper = new Reaper(client, executor, reapingThresholdMs, leaderPath); - addPath(path); - } - - /** - * The reaper must be started - * - * @throws Exception errors - */ - public void start() throws Exception - { - Preconditions.checkState(state.compareAndSet(State.LATENT, State.STARTED), "Cannot be started more than once"); - - task = executor.scheduleWithFixedDelay - ( - new Runnable() - { - @Override - public void run() - { - doWork(); - } - }, - reapingThresholdMs, - reapingThresholdMs, - TimeUnit.MILLISECONDS - ); - - reaper.start(); - } - - @Override - public void close() throws IOException - { - if ( state.compareAndSet(State.STARTED, State.CLOSED) ) - { - CloseableUtils.closeQuietly(reaper); - task.cancel(true); - } - } - - /** - * Add a path to reap children from - * - * @param path the path - * @return this for chaining - */ - public ChildReaper addPath(String path) - { - paths.add(PathUtils.validatePath(path)); - return this; - } - - /** - * Remove a path from reaping - * - * @param path the path - * @return true if the path existed and was removed - */ - public boolean removePath(String path) - { - return paths.remove(PathUtils.validatePath(path)); - } - - private static ScheduledExecutorService newExecutorService() - { - return ThreadUtils.newFixedThreadScheduledPool(2, "ChildReaper"); - } - - private void doWork() - { - for ( String path : paths ) - { - try - { - List children = client.getChildren().forPath(path); - for ( String name : children ) - { - String thisPath = ZKPaths.makePath(path, name); - Stat stat = client.checkExists().forPath(thisPath); - if ( (stat != null) && (stat.getNumChildren() == 0) ) - { - reaper.addPath(thisPath, mode); - } - } - } - catch ( Exception e ) - { - log.error("Could not get children for path: " + path, e); - } - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestChildReaper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestChildReaper.java deleted file mode 100644 index 960471841948c..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestChildReaper.java +++ /dev/null @@ -1,209 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.util.curator; - -import org.apache.curator.framework.recipes.locks.Reaper; -import org.apache.curator.test.TestingServer; -import org.apache.curator.utils.CloseableUtils; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.apache.curator.retry.RetryOneTime; -import org.apache.curator.test.Timing; -import org.apache.zookeeper.data.Stat; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.net.BindException; -import java.util.Random; - -import static org.assertj.core.api.Assertions.assertThat; - -/** - * This is a copy of Curator 2.7.1's TestChildReaper class, with minor - * modifications to make it work with JUnit (some setup code taken from - * Curator's BaseClassForTests). This is to ensure that the ChildReaper - * class we modified is still correct. - */ -public class TestChildReaper -{ - protected TestingServer server; - - @Before - public void setup() throws Exception { - while(this.server == null) { - try { - this.server = new TestingServer(); - } catch (BindException var2) { - System.err.println("Getting bind exception - retrying to allocate server"); - this.server = null; - } - } - } - - @After - public void teardown() throws Exception { - this.server.close(); - this.server = null; - } - - @Test - public void testSomeNodes() throws Exception - { - - Timing timing = new Timing(); - ChildReaper reaper = null; - CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); - try - { - client.start(); - - Random r = new Random(); - int nonEmptyNodes = 0; - for ( int i = 0; i < 10; ++i ) - { - client.create().creatingParentsIfNeeded().forPath("/test/" + Integer.toString(i)); - if ( r.nextBoolean() ) - { - client.create().forPath("/test/" + Integer.toString(i) + "/foo"); - ++nonEmptyNodes; - } - } - - reaper = new ChildReaper(client, "/test", Reaper.Mode.REAP_UNTIL_DELETE, 1); - reaper.start(); - - timing.forWaiting().sleepABit(); - - Stat stat = client.checkExists().forPath("/test"); - assertThat(stat.getNumChildren()).isEqualTo(nonEmptyNodes); - } - finally - { - CloseableUtils.closeQuietly(reaper); - CloseableUtils.closeQuietly(client); - } - } - - @Test - public void testSimple() throws Exception - { - Timing timing = new Timing(); - ChildReaper reaper = null; - CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); - try - { - client.start(); - - for ( int i = 0; i < 10; ++i ) - { - client.create().creatingParentsIfNeeded().forPath("/test/" + Integer.toString(i)); - } - - reaper = new ChildReaper(client, "/test", Reaper.Mode.REAP_UNTIL_DELETE, 1); - reaper.start(); - - timing.forWaiting().sleepABit(); - - Stat stat = client.checkExists().forPath("/test"); - assertThat(stat.getNumChildren()).isZero(); - } - finally - { - CloseableUtils.closeQuietly(reaper); - CloseableUtils.closeQuietly(client); - } - } - - @Test - public void testMultiPath() throws Exception - { - Timing timing = new Timing(); - ChildReaper reaper = null; - CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); - try - { - client.start(); - - for ( int i = 0; i < 10; ++i ) - { - client.create().creatingParentsIfNeeded().forPath("/test1/" + Integer.toString(i)); - client.create().creatingParentsIfNeeded().forPath("/test2/" + Integer.toString(i)); - client.create().creatingParentsIfNeeded().forPath("/test3/" + Integer.toString(i)); - } - - reaper = new ChildReaper(client, "/test2", Reaper.Mode.REAP_UNTIL_DELETE, 1); - reaper.start(); - reaper.addPath("/test1"); - - timing.forWaiting().sleepABit(); - - Stat stat = client.checkExists().forPath("/test1"); - assertThat(stat.getNumChildren()).isZero(); - stat = client.checkExists().forPath("/test2"); - assertThat(stat.getNumChildren()).isZero(); - stat = client.checkExists().forPath("/test3"); - assertThat(stat.getNumChildren()).isEqualTo(10); - } - finally - { - CloseableUtils.closeQuietly(reaper); - CloseableUtils.closeQuietly(client); - } - } - - @Test - public void testNamespace() throws Exception - { - Timing timing = new Timing(); - ChildReaper reaper = null; - CuratorFramework client = CuratorFrameworkFactory.builder() - .connectString(server.getConnectString()) - .sessionTimeoutMs(timing.session()) - .connectionTimeoutMs(timing.connection()) - .retryPolicy(new RetryOneTime(1)) - .namespace("foo") - .build(); - try - { - client.start(); - - for ( int i = 0; i < 10; ++i ) - { - client.create().creatingParentsIfNeeded().forPath("/test/" + Integer.toString(i)); - } - - reaper = new ChildReaper(client, "/test", Reaper.Mode.REAP_UNTIL_DELETE, 1); - reaper.start(); - - timing.forWaiting().sleepABit(); - - Stat stat = client.checkExists().forPath("/test"); - assertThat(stat.getNumChildren()).isZero(); - - stat = client.usingNamespace(null).checkExists().forPath("/foo/test"); - assertThat(stat).isNotNull(); - assertThat(stat.getNumChildren()).isZero(); - } - finally - { - CloseableUtils.closeQuietly(reaper); - CloseableUtils.closeQuietly(client); - } - } -} From 1890c8b3636b5d31c6c22406f197ad2feeafd68d Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 6 May 2021 23:37:45 +0900 Subject: [PATCH 297/666] YARN-10756. Remove additional junit 4.11 dependency from javadoc. (#2960) Reviewed-by: Viraj Jasani Reviewed-by: Bilwa S T Signed-off-by: Takanobu Asanuma --- .../pom.xml | 13 ------------- .../pom.xml | 13 ------------- .../pom.xml | 13 ------------- .../hadoop-yarn-server-timelineservice/pom.xml | 13 ------------- 4 files changed, 52 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml index c51b5a20baecb..ccd1b13d1572a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml @@ -142,19 +142,6 @@ - - org.apache.maven.plugins - maven-javadoc-plugin - - - - junit - junit - 4.11 - - - - org.apache.maven.plugins maven-dependency-plugin diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml index 12b0112680320..d8cf6ded0001e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml @@ -406,19 +406,6 @@ - - org.apache.maven.plugins - maven-javadoc-plugin - - - - junit - junit - 4.11 - - - - org.apache.maven.plugins diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml index 355a2697496dc..6207a2d024eb2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml @@ -204,19 +204,6 @@ - - org.apache.maven.plugins - maven-javadoc-plugin - - - - junit - junit - 4.11 - - - - org.apache.maven.plugins maven-dependency-plugin diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml index cb4f09766b881..65af3afadda62 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml @@ -157,19 +157,6 @@ - - org.apache.maven.plugins - maven-javadoc-plugin - - - - junit - junit - 4.11 - - - - From 28c1f4bf728861f980ba8719a2b3e391437c0408 Mon Sep 17 00:00:00 2001 From: hchaverr Date: Thu, 6 May 2021 16:40:45 -0700 Subject: [PATCH 298/666] HADOOP-17680. Allow ProtobufRpcEngine to be extensible (#2905) Contributed by Hector Chaverri. --- .../apache/hadoop/ipc/ProtobufRpcEngine.java | 30 +++++++++++++++---- .../apache/hadoop/ipc/ProtobufRpcEngine2.java | 30 +++++++++++++++---- 2 files changed, 48 insertions(+), 12 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java index a1500d52a7444..882cc141d8913 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java @@ -122,7 +122,7 @@ public ProtocolProxy getProtocolMetaInfoProxy( factory)), false); } - private static class Invoker implements RpcInvocationHandler { + protected static class Invoker implements RpcInvocationHandler { private final Map returnTypes = new ConcurrentHashMap(); private boolean isClosed = false; @@ -133,7 +133,7 @@ private static class Invoker implements RpcInvocationHandler { private AtomicBoolean fallbackToSimpleAuth; private AlignmentContext alignmentContext; - private Invoker(Class protocol, InetSocketAddress addr, + protected Invoker(Class protocol, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy, AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext) @@ -148,7 +148,7 @@ private Invoker(Class protocol, InetSocketAddress addr, /** * This constructor takes a connectionId, instead of creating a new one. */ - private Invoker(Class protocol, Client.ConnectionId connId, + protected Invoker(Class protocol, Client.ConnectionId connId, Configuration conf, SocketFactory factory) { this.remoteId = connId; this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class); @@ -225,8 +225,6 @@ public Message invoke(Object proxy, final Method method, Object[] args) traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method)); } - RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method); - if (LOG.isTraceEnabled()) { LOG.trace(Thread.currentThread().getId() + ": Call -> " + remoteId + ": " + method.getName() + @@ -238,7 +236,7 @@ public Message invoke(Object proxy, final Method method, Object[] args) final RpcWritable.Buffer val; try { val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER, - new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId, + constructRpcRequest(method, theRequest), remoteId, fallbackToSimpleAuth, alignmentContext); } catch (Throwable e) { @@ -283,6 +281,11 @@ public boolean isDone() { } } + protected Writable constructRpcRequest(Method method, Message theRequest) { + RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method); + return new RpcProtobufRequest(rpcRequestHeader, theRequest); + } + private Message getReturnMessage(final Method method, final RpcWritable.Buffer buf) throws ServiceException { Message prototype = null; @@ -332,6 +335,14 @@ private Message getReturnProtoType(Method method) throws Exception { public ConnectionId getConnectionId() { return remoteId; } + + protected long getClientProtocolVersion() { + return clientProtocolVersion; + } + + protected String getProtocolName() { + return protocolName; + } } @VisibleForTesting @@ -518,6 +529,13 @@ public Writable call(RPC.Server server, String connectionProtocolName, String declaringClassProtoName = rpcRequest.getDeclaringClassProtocolName(); long clientVersion = rpcRequest.getClientProtocolVersion(); + return call(server, connectionProtocolName, request, receiveTime, + methodName, declaringClassProtoName, clientVersion); + } + + protected Writable call(RPC.Server server, String connectionProtocolName, + RpcWritable.Buffer request, long receiveTime, String methodName, + String declaringClassProtoName, long clientVersion) throws Exception { if (server.verbose) LOG.info("Call: connectionProtocolName=" + connectionProtocolName + ", method=" + methodName); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java index 310f44eebe213..2f5d56437d034 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java @@ -116,7 +116,7 @@ public ProtocolProxy getProtocolMetaInfoProxy( factory)), false); } - private static final class Invoker implements RpcInvocationHandler { + protected static class Invoker implements RpcInvocationHandler { private final Map returnTypes = new ConcurrentHashMap(); private boolean isClosed = false; @@ -127,7 +127,7 @@ private static final class Invoker implements RpcInvocationHandler { private AtomicBoolean fallbackToSimpleAuth; private AlignmentContext alignmentContext; - private Invoker(Class protocol, InetSocketAddress addr, + protected Invoker(Class protocol, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy, AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext) @@ -142,7 +142,7 @@ private Invoker(Class protocol, InetSocketAddress addr, /** * This constructor takes a connectionId, instead of creating a new one. */ - private Invoker(Class protocol, Client.ConnectionId connId, + protected Invoker(Class protocol, Client.ConnectionId connId, Configuration conf, SocketFactory factory) { this.remoteId = connId; this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class); @@ -219,8 +219,6 @@ public Message invoke(Object proxy, final Method method, Object[] args) traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method)); } - RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method); - if (LOG.isTraceEnabled()) { LOG.trace(Thread.currentThread().getId() + ": Call -> " + remoteId + ": " + method.getName() + @@ -232,7 +230,7 @@ public Message invoke(Object proxy, final Method method, Object[] args) final RpcWritable.Buffer val; try { val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER, - new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId, + constructRpcRequest(method, theRequest), remoteId, fallbackToSimpleAuth, alignmentContext); } catch (Throwable e) { @@ -279,6 +277,11 @@ public boolean isDone() { } } + protected Writable constructRpcRequest(Method method, Message theRequest) { + RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method); + return new RpcProtobufRequest(rpcRequestHeader, theRequest); + } + private Message getReturnMessage(final Method method, final RpcWritable.Buffer buf) throws ServiceException { Message prototype = null; @@ -328,6 +331,14 @@ private Message getReturnProtoType(Method method) throws Exception { public ConnectionId getConnectionId() { return remoteId; } + + protected long getClientProtocolVersion() { + return clientProtocolVersion; + } + + protected String getProtocolName() { + return protocolName; + } } @VisibleForTesting @@ -509,6 +520,13 @@ public Writable call(RPC.Server server, String connectionProtocolName, String declaringClassProtoName = rpcRequest.getDeclaringClassProtocolName(); long clientVersion = rpcRequest.getClientProtocolVersion(); + return call(server, connectionProtocolName, request, receiveTime, + methodName, declaringClassProtoName, clientVersion); + } + + protected Writable call(RPC.Server server, String connectionProtocolName, + RpcWritable.Buffer request, long receiveTime, String methodName, + String declaringClassProtoName, long clientVersion) throws Exception { if (server.verbose) { LOG.info("Call: connectionProtocolName=" + connectionProtocolName + ", method=" + methodName); From f5918191533678e3ec22147740285fe121f9f8a7 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Fri, 7 May 2021 13:52:17 +0900 Subject: [PATCH 299/666] HADOOP-17375. Fix the error of TestDynamometerInfra. (#2471) Signed-off-by: Akira Ajisaka --- .../tools/dynamometer/AllowAllImpersonationProvider.java | 5 +++++ .../hadoop/tools/dynamometer/TestDynamometerInfra.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AllowAllImpersonationProvider.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AllowAllImpersonationProvider.java index 6800bbe0ef814..14f52ac2b28a2 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AllowAllImpersonationProvider.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AllowAllImpersonationProvider.java @@ -38,4 +38,9 @@ public void authorize(UserGroupInformation user, InetAddress remoteAddress) { // Do nothing } + // Although this API was removed from the interface by HADOOP-17367, we need + // to keep it here because TestDynamometerInfra uses an old hadoop binary. + public void authorize(UserGroupInformation user, String remoteAddress) { + // Do nothing + } } diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java index 174d9cc039e03..092a54b78aaad 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java @@ -122,7 +122,7 @@ public class TestDynamometerInfra { private static final String HADOOP_BIN_PATH_KEY = "dyno.hadoop.bin.path"; private static final String HADOOP_BIN_VERSION_KEY = "dyno.hadoop.bin.version"; - private static final String HADOOP_BIN_VERSION_DEFAULT = "3.1.3"; + private static final String HADOOP_BIN_VERSION_DEFAULT = "3.1.4"; private static final String FSIMAGE_FILENAME = "fsimage_0000000000000061740"; private static final String VERSION_FILENAME = "VERSION"; From 06112b219b6322936eeed48c7d2b2324ee129af5 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Sat, 8 May 2021 00:24:06 +0900 Subject: [PATCH 300/666] HDFS-16001. TestOfflineEditsViewer.testStored() fails reading negative value of FSEditLogOpCodes. (#2980) Reviewed-by: Xiaoqiao He --- .../src/test/resources/editsStored | Bin 7933 -> 7933 bytes .../src/test/resources/editsStored.xml | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index 63c3a8f5c61bea4bfa9d3091c1659ac75e25f9b2..6a0891d2dc7fed0e41e576ef3b67c483fe8d5d68 100644 GIT binary patch delta 13 Vcmexs``4D`|NsAcH?sVc0{}Xs2mb&7 delta 13 Vcmexs``4D`|Ns97HnRMb0{}X*2m$~A diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index 4e91a59e39ac8..afadc5d884565 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -1,6 +1,6 @@ - -64 + -67 OP_START_LOG_SEGMENT From f59864d80c4653d978823254a723bf8926de828b Mon Sep 17 00:00:00 2001 From: Jinglun Date: Sat, 8 May 2021 19:50:17 +0800 Subject: [PATCH 301/666] HDFS-15923. RBF: Authentication failed when rename accross sub clusters (#2819). Contributed by zhuobin zheng. Reviewed-by: Jinglun Reviewed-by: Inigo Goiri --- hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 6 + .../router/RouterFederationRename.java | 47 ++-- .../router/TestRouterFederationRename.java | 2 +- ...stRouterFederationRenameInKerberosEnv.java | 260 ++++++++++++++++++ 4 files changed, 298 insertions(+), 17 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index 8ba016a3f39ee..e17602d1f6466 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -101,6 +101,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-distcp test + + org.apache.zookeeper + zookeeper + test + test-jar + com.fasterxml.jackson.core jackson-annotations diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java index 8074fdd2f64a7..d2bf989f1f46e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java @@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.fedbalance.DistCpProcedure; import org.apache.hadoop.tools.fedbalance.FedBalanceConfigs; import org.apache.hadoop.tools.fedbalance.FedBalanceContext; @@ -31,6 +32,8 @@ import org.apache.hadoop.tools.fedbalance.procedure.BalanceProcedureScheduler; import java.io.IOException; +import java.io.InterruptedIOException; +import java.security.PrivilegedExceptionAction; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; @@ -96,23 +99,35 @@ boolean routerFedRename(final String src, final String dst, } RemoteLocation srcLoc = srcLocations.get(0); RemoteLocation dstLoc = dstLocations.get(0); - // Build and submit router federation rename job. - BalanceJob job = buildRouterRenameJob(srcLoc.getNameserviceId(), - dstLoc.getNameserviceId(), srcLoc.getDest(), dstLoc.getDest()); - BalanceProcedureScheduler scheduler = rpcServer.getFedRenameScheduler(); - countIncrement(); + + UserGroupInformation routerUser = UserGroupInformation.getLoginUser(); + try { - scheduler.submit(job); - LOG.info("Rename {} to {} from namespace {} to {}. JobId={}.", src, dst, - srcLoc.getNameserviceId(), dstLoc.getNameserviceId(), job.getId()); - scheduler.waitUntilDone(job); - if (job.getError() != null) { - throw new IOException("Rename of " + src + " to " + dst + " failed.", - job.getError()); - } - return true; - } finally { - countDecrement(); + // as router user with saveJournal and task submission privileges + return routerUser.doAs((PrivilegedExceptionAction) () -> { + // Build and submit router federation rename job. + BalanceJob job = buildRouterRenameJob(srcLoc.getNameserviceId(), + dstLoc.getNameserviceId(), srcLoc.getDest(), dstLoc.getDest()); + BalanceProcedureScheduler scheduler = rpcServer.getFedRenameScheduler(); + countIncrement(); + try { + scheduler.submit(job); + LOG.info("Rename {} to {} from namespace {} to {}. JobId={}.", src, + dst, srcLoc.getNameserviceId(), dstLoc.getNameserviceId(), + job.getId()); + scheduler.waitUntilDone(job); + if (job.getError() != null) { + throw new IOException("Rename of " + src + " to " + dst + + " failed.", job.getError()); + } + return true; + } finally { + countDecrement(); + } + }); + } catch (InterruptedException e) { + LOG.warn("Fed balance job is interrupted.", e); + throw new InterruptedIOException(e.getMessage()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java index c47098f3372cd..a9a17b3ef5025 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java @@ -409,7 +409,7 @@ public void testRbfRenameWithMultiDestination() throws Exception { getRouterFileSystem().delete(new Path(renamedDir), true); } - @Test(timeout = 10000) + @Test(timeout = 20000) public void testCounter() throws Exception { final RouterRpcServer rpcServer = router.getRouter().getRpcServer(); List nss = cluster.getNameservices(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java new file mode 100644 index 0000000000000..369508f3b22f7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java @@ -0,0 +1,260 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.ha.ClientBaseWithFixes; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.tools.fedbalance.DistCpProcedure; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_MAP; +import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager.ZK_DTSM_ZK_AUTH_TYPE; +import static org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager.ZK_DTSM_ZK_CONNECTION_STRING; +import static org.apache.hadoop.test.GenericTestUtils.getMethodName; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_PRINCIPAL; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +/** + * Basic tests of router federation rename. Rename across namespaces. + */ +public class TestRouterFederationRenameInKerberosEnv + extends ClientBaseWithFixes { + + private static final int NUM_SUBCLUSTERS = 2; + private static final int NUM_DNS = 6; + + private static String clientPrincipal = "client@EXAMPLE.COM"; + private static String serverPrincipal = System.getenv().get("USERNAME") + + "/localhost@EXAMPLE.COM"; + private static String keytab = new File( + System.getProperty("test.dir", "target"), + UUID.randomUUID().toString()) + .getAbsolutePath(); + + private static Configuration baseConf = new Configuration(false); + + private static MiniKdc kdc; + + /** Federated HDFS cluster. */ + private MiniRouterDFSCluster cluster; + + /** Random Router for this federated cluster. */ + private RouterContext router; + + @BeforeClass + public static void globalSetUp() throws Exception { + // init KDC + File workDir = new File(System.getProperty("test.dir", "target")); + kdc = new MiniKdc(MiniKdc.createConf(), workDir); + kdc.start(); + kdc.createPrincipal(new File(keytab), clientPrincipal, serverPrincipal); + + + baseConf.setBoolean(DFSConfigKeys.HADOOP_CALLER_CONTEXT_ENABLED_KEY, + true); + + SecurityUtil.setAuthenticationMethod(KERBEROS, baseConf); + baseConf.set(RBFConfigKeys.DFS_ROUTER_KERBEROS_PRINCIPAL_KEY, + serverPrincipal); + baseConf.set(RBFConfigKeys.DFS_ROUTER_KEYTAB_FILE_KEY, keytab); + baseConf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, + serverPrincipal); + baseConf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); + baseConf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, + serverPrincipal); + baseConf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytab); + baseConf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, + true); + + baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, + DFS_DATA_TRANSFER_PROTECTION_DEFAULT); + baseConf.setBoolean(IGNORE_SECURE_PORTS_FOR_TESTING_KEY, true); + + DistCpProcedure.enableForTest(); + } + + @AfterClass + public static void globalTearDown() { + kdc.stop(); + DistCpProcedure.disableForTest(); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + cluster.shutdown(); + } + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + + cluster = new MiniRouterDFSCluster(false, NUM_SUBCLUSTERS); + cluster.setNumDatanodesPerNameservice(NUM_DNS); + cluster.addNamenodeOverrides(baseConf); + cluster.setIndependentDNs(); + // Start NNs and DNs and wait until ready. + cluster.startCluster(); + + // Start routers, enable router federation rename. + String journal = "hdfs://" + cluster.getCluster().getNameNode(1) + .getClientNamenodeAddress() + "/journal"; + Configuration routerConf = new RouterConfigBuilder() + .metrics() + .rpc() + .routerRenameOption() + .set(SCHEDULER_JOURNAL_URI, journal) + .set(DFS_ROUTER_FEDERATION_RENAME_MAP, "1") + .set(DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH, "1") + .set(ZK_DTSM_ZK_CONNECTION_STRING, hostPort) + .set(ZK_DTSM_ZK_AUTH_TYPE, "none") + .set(RM_PRINCIPAL, serverPrincipal) + .build(); + // We decrease the DN cache times to make the test faster. + routerConf.setTimeDuration( + RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); + cluster.addRouterOverrides(baseConf); + cluster.addRouterOverrides(routerConf); + cluster.startRouters(); + + // Register and verify all NNs with all routers + cluster.registerNamenodes(); + cluster.waitNamenodeRegistration(); + + // We decrease the DN heartbeat expire interval to make them dead faster + cluster.getCluster().getNamesystem(0).getBlockManager() + .getDatanodeManager().setHeartbeatInterval(1); + cluster.getCluster().getNamesystem(1).getBlockManager() + .getDatanodeManager().setHeartbeatInterval(1); + cluster.getCluster().getNamesystem(0).getBlockManager() + .getDatanodeManager().setHeartbeatExpireInterval(3000); + cluster.getCluster().getNamesystem(1).getBlockManager() + .getDatanodeManager().setHeartbeatExpireInterval(3000); + + // Create mock locations + cluster.installMockLocations(); + + // Create test fixtures on NN + cluster.createTestDirectoriesNamenode(); + + // Random router for this test + RouterContext rndRouter = cluster.getRandomRouter(); + setRouter(rndRouter); + } + + protected void createDir(FileSystem fs, String dir) throws IOException { + fs.mkdirs(new Path(dir)); + String file = dir + "/file"; + createFile(fs, file, 32); + verifyFileExists(fs, dir); + verifyFileExists(fs, file); + } + + protected void testRenameDir(RouterContext testRouter, String path, + String renamedPath, boolean exceptionExpected, Callable call) + throws IOException { + createDir(testRouter.getFileSystem(), path); + // rename + boolean exceptionThrown = false; + try { + call.call(); + assertFalse(verifyFileExists(testRouter.getFileSystem(), path)); + assertTrue(verifyFileExists(testRouter.getFileSystem(), + renamedPath + "/file")); + } catch (Exception ex) { + exceptionThrown = true; + assertTrue(verifyFileExists(testRouter.getFileSystem(), + path + "/file")); + assertFalse(verifyFileExists(testRouter.getFileSystem(), renamedPath)); + } finally { + FileContext fileContext = testRouter.getFileContext(); + fileContext.delete(new Path(path), true); + fileContext.delete(new Path(renamedPath), true); + } + if (exceptionExpected) { + // Error was expected. + assertTrue(exceptionThrown); + } else { + // No error was expected. + assertFalse(exceptionThrown); + } + } + + protected void setRouter(RouterContext r) throws IOException { + this.router = r; + } + + @Test + public void testClientRename() throws IOException { + String ns0 = cluster.getNameservices().get(0); + String ns1 = cluster.getNameservices().get(1); + // Test successfully rename a dir to a destination that is in a different + // namespace. + String dir = + cluster.getFederatedTestDirectoryForNS(ns0) + "/" + getMethodName(); + String renamedDir = + cluster.getFederatedTestDirectoryForNS(ns1) + "/" + getMethodName(); + testRenameDir(router, dir, renamedDir, false, () -> { + UserGroupInformation ugi = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(clientPrincipal, keytab); + ugi.doAs((PrivilegedExceptionAction) () -> { + DFSClient client = router.getClient(); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(dir, renamedDir); + return null; + }); + return null; + }); + + } + +} \ No newline at end of file From 27277dbce0121f0e6da19eb8238e0b72bf0482b6 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sun, 9 May 2021 03:40:08 +0530 Subject: [PATCH 302/666] HADOOP-17644. Add back the exceptions removed by HADOOP-17432 for compatibility. Contributed by Quan Li. --- .../hadoop/security/authentication/util/KerberosUtil.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java index 95f0106a24424..fc6f957b9622e 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java @@ -79,11 +79,16 @@ private static Oid getNumericOidInstance(String oidName) { * * @return Oid instance * @param oidName The oid Name + * @throws ClassNotFoundException for backward compatibility. + * @throws GSSException for backward compatibility. * @throws NoSuchFieldException if the input is not supported. + * @throws IllegalAccessException for backward compatibility. + * */ @Deprecated public static Oid getOidInstance(String oidName) - throws NoSuchFieldException { + throws ClassNotFoundException, GSSException, NoSuchFieldException, + IllegalAccessException { switch (oidName) { case "GSS_SPNEGO_MECH_OID": return GSS_SPNEGO_MECH_OID; From af0b689cfb65341d9e97e2735e34a6a7c97447b6 Mon Sep 17 00:00:00 2001 From: hexiaoqiao Date: Mon, 10 May 2021 14:11:36 +0800 Subject: [PATCH 303/666] HADOOP-17690. Improve the log for The DecayRpcScheduler. Contributed by Bhavik Patel. --- .../apache/hadoop/ipc/DecayRpcScheduler.java | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index 5477c971c3aa1..e69064d8fb882 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -628,7 +628,8 @@ private int cachedOrComputedPriorityLevel(Object identity) { List costList = callCosts.get(identity); long currentCost = costList == null ? 0 : costList.get(0).get(); int priority = computePriorityLevel(currentCost, identity); - LOG.debug("compute priority for {} priority {}", identity, priority); + LOG.debug("compute priority for identity: {}={}", identity, + priority); return priority; } @@ -666,7 +667,7 @@ int getPriorityLevel(UserGroupInformation ugi) { void setPriorityLevel(UserGroupInformation ugi, int priority) { String identity = getIdentity(newSchedulable(ugi)); priority = Math.min(numLevels - 1, priority); - LOG.info("Setting priority for user:" + identity + "=" + priority); + LOG.info("Setting priority for user: {}={}", identity, priority); staticPriorities.put(identity, priority); } @@ -735,11 +736,9 @@ public void addResponseTime(String callName, Schedulable schedulable, responseTimeCountInCurrWindow.getAndIncrement(priorityLevel); responseTimeTotalInCurrWindow.getAndAdd(priorityLevel, queueTime+processingTime); - if (LOG.isDebugEnabled()) { - LOG.debug("addResponseTime for call: {} priority: {} queueTime: {} " + - "processingTime: {} ", callName, priorityLevel, queueTime, - processingTime); - } + LOG.debug("addResponseTime for call: {} priority: {} queueTime: {} " + + "processingTime: {} ", callName, priorityLevel, queueTime, + processingTime); } // Update the cached average response time at the end of the decay window @@ -763,10 +762,8 @@ void updateAverageResponseTime(boolean enableDecay) { responseTimeAvgInLastWindow.set(i, 0); } responseTimeCountInLastWindow.set(i, responseTimeCount); - if (LOG.isDebugEnabled()) { - LOG.debug("updateAverageResponseTime queue: {} Average: {} Count: {}", - i, averageResponseTime, responseTimeCount); - } + LOG.debug("updateAverageResponseTime queue: {} Average: {} Count: {}", + i, averageResponseTime, responseTimeCount); // Reset for next decay window responseTimeTotalInCurrWindow.set(i, 0); responseTimeCountInCurrWindow.set(i, 0); From 5e060c4e71c05676df3f37031d0e8c24faca3cc5 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Mon, 10 May 2021 17:09:51 +0530 Subject: [PATCH 304/666] HDFS-16003. ProcessReport print invalidatedBlocks should judge debug level at first. Contributed by lei w. --- .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index fdf300f913dd0..247de6b411885 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2831,9 +2831,12 @@ public boolean processReport(final DatanodeID nodeID, namesystem.writeUnlock(); } - for (Block b : invalidatedBlocks) { - blockLog.debug("BLOCK* processReport 0x{}: {} on node {} size {} does not" - + " belong to any file", strBlockReportId, b, node, b.getNumBytes()); + if(blockLog.isDebugEnabled()) { + for (Block b : invalidatedBlocks) { + blockLog.debug("BLOCK* processReport 0x{}: {} on node {} size {} " + + "does not belong to any file.", strBlockReportId, b, + node, b.getNumBytes()); + } } // Log the block report processing stats from Namenode perspective From 0d9bc4baf953856dcfa90ef6e140ee3eab08e20a Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Mon, 10 May 2021 22:35:14 +0530 Subject: [PATCH 305/666] HADOOP-17678. Add Dockerfile for Centos 7 (#2967) --- dev-support/docker/Dockerfile_centos_7 | 169 +++++++++++++++++++++++++ 1 file changed, 169 insertions(+) create mode 100644 dev-support/docker/Dockerfile_centos_7 diff --git a/dev-support/docker/Dockerfile_centos_7 b/dev-support/docker/Dockerfile_centos_7 new file mode 100644 index 0000000000000..bf95835e8194d --- /dev/null +++ b/dev-support/docker/Dockerfile_centos_7 @@ -0,0 +1,169 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Dockerfile for installing the necessary dependencies for building Hadoop. +# See BUILDING.txt. + +FROM centos:7 + +WORKDIR /root + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +RUN yum update -y \ + && yum install -y centos-release-scl \ + && yum install -y devtoolset-9 \ + && yum install -y \ + ant \ + build-essential \ + bzip2 \ + bzip2-devel \ + clang \ + curl \ + cyrus-sasl-devel \ + doxygen \ + fuse \ + fuse-libs \ + fuse-devel \ + git \ + libcurl-devel \ + libtirpc-devel \ + libpmem-devel \ + libtool \ + lz4-devel \ + make \ + openssl-devel \ + pinentry-curses \ + python3 \ + python3-pip \ + python3-setuptools \ + python3-wheel \ + rsync \ + snappy-devel \ + sudo \ + valgrind \ + zlib-devel + +# Set GCC 9 as the default C/C++ compiler +RUN echo "source /opt/rh/devtoolset-9/enable" >> /etc/bashrc +SHELL ["/bin/bash", "--login", "-c"] + +#### +# Install Maven 3.6.3 +#### +RUN mkdir -p /opt/maven /tmp/maven \ + && curl -L -s -S https://mirrors.estointernet.in/apache/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz \ + -o /tmp/maven/apache-maven-3.6.3-bin.tar.gz \ + && tar xzf /tmp/maven/apache-maven-3.6.3-bin.tar.gz --strip-components 1 -C /opt/maven + +#### +# Install CMake 3.19 +#### +# hadolint ignore=DL3003 +RUN mkdir -p /tmp/cmake /opt/cmake \ + && curl -L -s -S https://cmake.org/files/v3.19/cmake-3.19.0.tar.gz -o /tmp/cmake/cmake-3.19.0.tar.gz \ + && tar xzf /tmp/cmake/cmake-3.19.0.tar.gz --strip-components 1 -C /opt/cmake \ + && cd /opt/cmake || exit && ./bootstrap \ + && make "-j$(nproc)" \ + && make install \ + && cd /root || exit + +#### +# Install zstandard +#### +# hadolint ignore=DL3003 +RUN mkdir -p /opt/zstd /tmp/zstd \ + && curl -L -s -S https://github.com/facebook/zstd/archive/refs/tags/v1.4.9.tar.gz -o /tmp/zstd/v1.4.9.tar.gz \ + && tar xzf /tmp/zstd/v1.4.9.tar.gz --strip-components 1 -C /opt/zstd \ + && cd /opt/zstd || exit \ + && make "-j$(nproc)" \ + && make install \ + && cd /root || exit + +###### +# Set env vars required to build Hadoop +###### +ENV MAVEN_HOME /opt/maven +ENV PATH "${PATH}:${MAVEN_HOME}/bin" +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-1.8.0 + +####### +# Install SpotBugs 4.2.2 +####### +RUN mkdir -p /opt/spotbugs \ + && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ + -o /opt/spotbugs.tgz \ + && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ + && chmod +x /opt/spotbugs/bin/* +ENV SPOTBUGS_HOME /opt/spotbugs + +####### +# Install Boost 1.72 (1.71 ships with Focal) +####### +# hadolint ignore=DL3003 +RUN mkdir -p /opt/boost-library \ + && curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ + && mv boost_1_72_0.tar.bz2 /opt/boost-library \ + && cd /opt/boost-library \ + && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ + && cd /opt/boost-library/boost_1_72_0 \ + && ./bootstrap.sh --prefix=/usr/ \ + && ./b2 --without-python install \ + && cd /root \ + && rm -rf /opt/boost-library + +###### +# Install Google Protobuf 3.7.1 (3.6.1 ships with Focal) +###### +# hadolint ignore=DL3003 +RUN mkdir -p /opt/protobuf-src \ + && curl -L -s -S \ + https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \ + -o /opt/protobuf.tar.gz \ + && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \ + && cd /opt/protobuf-src \ + && ./configure --prefix=/opt/protobuf \ + && make "-j$(nproc)" \ + && make install \ + && cd /root \ + && rm -rf /opt/protobuf-src +ENV PROTOBUF_HOME /opt/protobuf +ENV PATH "${PATH}:/opt/protobuf/bin" + +#### +# Install Node.js +#### +# hadolint ignore=DL3003 +RUN mkdir -p /tmp/node \ + && curl -L -s -S https://nodejs.org/dist/v14.16.1/node-v14.16.1.tar.gz -o /tmp/node-v14.16.1.tar.gz \ + && tar xzf /tmp/node-v14.16.1.tar.gz --strip-components 1 -C /tmp/node \ + && cd /tmp/node || exit \ + && ./configure \ + && make "-j$(nproc)" \ + && make install \ + && cd /root || exit + +#### +# Install pylint and python-dateutil +#### +RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 + +#### +# Install bower +#### +# hadolint ignore=DL3008 +RUN npm install -g bower@1.8.8 From a08acf14a67acde3006c53cda7014a0fa1ad8f4b Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Mon, 10 May 2021 12:41:07 -0700 Subject: [PATCH 306/666] HDFS-15997. Implement dfsadmin -provisionSnapshotTrash -all (#2958) --- .../apache/hadoop/hdfs/tools/DFSAdmin.java | 51 ++++++++++++++----- 1 file changed, 37 insertions(+), 14 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index c874d9290fa8b..4f57040867588 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -38,6 +38,7 @@ import java.util.TreeSet; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.hdfs.client.HdfsAdmin; @@ -461,7 +462,7 @@ static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOExcep "\t[-fetchImage ]\n" + "\t[-allowSnapshot ]\n" + "\t[-disallowSnapshot ]\n" + - "\t[-provisionSnapshotTrash ]\n" + + "\t[-provisionSnapshotTrash [-all]]\n" + "\t[-shutdownDatanode [upgrade]]\n" + "\t[-evictWriters ]\n" + "\t[-getDatanodeInfo ]\n" + @@ -793,14 +794,7 @@ public void disallowSnapshot(String[] argv) throws IOException { System.out.println("Disallowing snapshot on " + argv[1] + " succeeded"); } - /** - * Provision trash root in a snapshottable directory. - * Usage: hdfs dfsadmin -provisionSnapshotTrash snapshotDir - * @param argv List of of command line parameters. - * @exception IOException - */ - public void provisionSnapshotTrash(String[] argv) throws IOException { - Path p = new Path(argv[1]); + private void provisionSnapshotTrashInternal(Path p) throws IOException { final HdfsAdmin admin = new HdfsAdmin(p.toUri(), getConf()); Path trashRoot; try { @@ -809,7 +803,35 @@ public void provisionSnapshotTrash(String[] argv) throws IOException { throw new RemoteException(e.getClass().getName(), e.getMessage()); } System.out.println("Successfully provisioned snapshot trash at " + - trashRoot); + trashRoot); + } + + private void provisionSnapshotTrashAll() throws IOException { + // Get all snapshottable directories + final DistributedFileSystem dfs = getDFS(); + SnapshottableDirectoryStatus[] lst = dfs.getSnapshottableDirListing(); + if (lst != null) { + for (SnapshottableDirectoryStatus dirStatus : lst) { + final Path p = dirStatus.getFullPath(); + provisionSnapshotTrashInternal(p); + } + } + } + + /** + * Provision trash root in a snapshottable directory. + * Usage: hdfs dfsadmin -provisionSnapshotTrash snapshotDir + * hdfs dfsadmin -provisionSnapshotTrash -all + * @param argv List of of command line parameters. + * @exception IOException + */ + public void provisionSnapshotTrash(String[] argv) throws IOException { + if (argv[1].equals("-all")) { + provisionSnapshotTrashAll(); + return; + } + Path p = new Path(argv[1]); + provisionSnapshotTrashInternal(p); } /** @@ -1266,9 +1288,10 @@ private void printHelp(String cmd) { String disallowSnapshot = "-disallowSnapshot :\n" + "\tDo not allow snapshots to be taken on a directory any more.\n"; - String provisionSnapshotTrash = "-provisionSnapshotTrash :\n" + - "\tProvision trash root in a snapshottable directory with permission" - + "\t" + HdfsAdmin.TRASH_PERMISSION + ".\n"; + String provisionSnapshotTrash = + "-provisionSnapshotTrash [-all]:\n" + + "\tProvision trash root in one or all snapshottable directories." + + "\tTrash permission is " + HdfsAdmin.TRASH_PERMISSION + ".\n"; String shutdownDatanode = "-shutdownDatanode [upgrade]\n" + "\tSubmit a shutdown request for the given datanode. If an optional\n" @@ -2115,7 +2138,7 @@ private static void printUsage(String cmd) { + " [-disallowSnapshot ]"); } else if ("-provisionSnapshotTrash".equalsIgnoreCase(cmd)) { System.err.println("Usage: hdfs dfsadmin" - + " [-provisionSnapshotTrash ]"); + + " [-provisionSnapshotTrash [-all]]"); } else if ("-saveNamespace".equals(cmd)) { System.err.println("Usage: hdfs dfsadmin" + " [-saveNamespace [-beforeShutdown]]"); From 6c9584c74d56ccdb62cbaaab60493471c7eb06d5 Mon Sep 17 00:00:00 2001 From: Borislav Iordanov Date: Mon, 10 May 2021 16:31:48 -0400 Subject: [PATCH 307/666] HADOOP-17665 Ignore missing keystore configuration in reloading mechanism --- .../org/apache/hadoop/http/HttpServer2.java | 34 +++++++---- .../security/ssl/FileMonitoringTimerTask.java | 58 ++++++++++++++----- hadoop-yarn-project/hadoop-yarn/pom.xml | 1 + 3 files changed, 66 insertions(+), 27 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index dde27dbec497a..9f7562d35aa2c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -27,6 +27,7 @@ import java.net.MalformedURLException; import java.net.URI; import java.net.URL; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.List; import java.util.ArrayList; @@ -587,7 +588,8 @@ private ServerConnector createHttpsChannelConnector( conf.getLong(FileBasedKeyStoresFactory.SSL_STORES_RELOAD_INTERVAL_TPL_KEY, FileBasedKeyStoresFactory.DEFAULT_SSL_STORES_RELOAD_INTERVAL); - if (storesReloadInterval > 0) { + if (storesReloadInterval > 0 && + (keyStore != null || trustStore != null)) { this.configurationChangeMonitor = Optional.of( this.makeConfigurationChangeMonitor(storesReloadInterval, sslContextFactory)); } @@ -601,22 +603,30 @@ private ServerConnector createHttpsChannelConnector( private Timer makeConfigurationChangeMonitor(long reloadInterval, SslContextFactory.Server sslContextFactory) { java.util.Timer timer = new java.util.Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); + ArrayList locations = new ArrayList(); + if (keyStore != null) { + locations.add(Paths.get(keyStore)); + } + if (trustStore != null) { + locations.add(Paths.get(trustStore)); + } // // The Jetty SSLContextFactory provides a 'reload' method which will reload both // truststore and keystore certificates. // timer.schedule(new FileMonitoringTimerTask( - Paths.get(keyStore), - path -> { - LOG.info("Reloading certificates from store keystore " + keyStore); - try { - sslContextFactory.reload(factory -> { }); - } catch (Exception ex) { - LOG.error("Failed to reload SSL keystore certificates", ex); - } - },null), - reloadInterval, - reloadInterval + locations, + path -> { + LOG.info("Reloading keystore and truststore certificates."); + try { + sslContextFactory.reload(factory -> { }); + } catch (Exception ex) { + LOG.error("Failed to reload SSL keystore " + + "and truststore certificates", ex); + } + },null), + reloadInterval, + reloadInterval ); return timer; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java index 40b61978ef1f4..d42d3173cb2b4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java @@ -24,6 +24,9 @@ import org.slf4j.LoggerFactory; import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.TimerTask; import java.util.function.Consumer; @@ -42,34 +45,59 @@ public class FileMonitoringTimerTask extends TimerTask { static final String PROCESS_ERROR_MESSAGE = "Could not process file change : "; - final private Path filePath; + final private List filePaths; final private Consumer onFileChange; final Consumer onChangeFailure; - private long lastProcessed; + private List lastProcessed; /** - * Create file monitoring task to be scheduled using a standard Java {@link java.util.Timer} - * instance. + * See {@link #FileMonitoringTimerTask(List, Consumer, Consumer)}. * - * @param filePath The path to the file to monitor. - * @param onFileChange The function to call when the file has changed. - * @param onChangeFailure The function to call when an exception is thrown during the - * file change processing. + * @param filePath The file to monitor. + * @param onFileChange What to do when the file changes. + * @param onChangeFailure What to do when onFileChange + * throws an exception. */ public FileMonitoringTimerTask(Path filePath, Consumer onFileChange, - Consumer onChangeFailure) { - Preconditions.checkNotNull(filePath, "path to monitor disk file is not set"); - Preconditions.checkNotNull(onFileChange, "action to monitor disk file is not set"); + Consumer onChangeFailure) { + this(Collections.singletonList(filePath), onFileChange, onChangeFailure); + } - this.filePath = filePath; - this.lastProcessed = filePath.toFile().lastModified(); + /** + * Create file monitoring task to be scheduled using a standard + * Java {@link java.util.Timer} instance. + * + * @param filePaths The path to the file to monitor. + * @param onFileChange The function to call when the file has changed. + * @param onChangeFailure The function to call when an exception is + * thrown during the file change processing. + */ + public FileMonitoringTimerTask(List filePaths, + Consumer onFileChange, + Consumer onChangeFailure) { + Preconditions.checkNotNull(filePaths, + "path to monitor disk file is not set"); + Preconditions.checkNotNull(onFileChange, + "action to monitor disk file is not set"); + + this.filePaths = new ArrayList(filePaths); + this.lastProcessed = new ArrayList(); + this.filePaths.forEach(path -> + this.lastProcessed.add(path.toFile().lastModified())); this.onFileChange = onFileChange; this.onChangeFailure = onChangeFailure; } @Override public void run() { - if (lastProcessed != filePath.toFile().lastModified()) { + int modified = -1; + for (int i = 0; i < filePaths.size() && modified < 0; i++) { + if (lastProcessed.get(i) != filePaths.get(i).toFile().lastModified()) { + modified = i; + } + } + if (modified > -1) { + Path filePath = filePaths.get(modified); try { onFileChange.accept(filePath); } catch (Throwable t) { @@ -79,7 +107,7 @@ public void run() { LOG.error(PROCESS_ERROR_MESSAGE + filePath.toString(), t); } } - lastProcessed = filePath.toFile().lastModified(); + lastProcessed.set(modified, filePath.toFile().lastModified()); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index caa9a1b6c0bc6..e97e35608553a 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -246,4 +246,5 @@ hadoop-yarn-ui hadoop-yarn-csi + From 7bed59fd4ce16ed2cd4f9054050d3cabd752be85 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Tue, 11 May 2021 09:08:15 +0530 Subject: [PATCH 308/666] HDFS-16007. Deserialization of ReplicaState should avoid throwing ArrayIndexOutOfBoundsException (#2982) Signed-off-by: Akira Ajisaka --- .../server/common/HdfsServerConstants.java | 28 +++++++++++++++++-- .../hdfs/server/common/TestJspHelper.java | 19 +++++++++++++ 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 254e9790da002..58e9d5fdd9557 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -23,6 +23,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.commons.lang3.Validate; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; @@ -287,6 +288,10 @@ enum ReplicaState { /** Temporary replica: created for replication and relocation only. */ TEMPORARY(4); + // Since ReplicaState (de)serialization depends on ordinal, either adding + // new value should be avoided to this enum or newly appended value should + // be handled by NameNodeLayoutVersion#Feature. + private static final ReplicaState[] cachedValues = ReplicaState.values(); private final int value; @@ -299,13 +304,32 @@ public int getValue() { return value; } + /** + * Retrieve ReplicaState corresponding to given index. + * + * @param v Index to retrieve {@link ReplicaState}. + * @return {@link ReplicaState} object. + * @throws IndexOutOfBoundsException if the index is invalid. + */ public static ReplicaState getState(int v) { + Validate.validIndex(cachedValues, v, "Index Expected range: [0, " + + (cachedValues.length - 1) + "]. Actual value: " + v); return cachedValues[v]; } - /** Read from in */ + /** + * Retrieve ReplicaState corresponding to index provided in binary stream. + * + * @param in Index value provided as bytes in given binary stream. + * @return {@link ReplicaState} object. + * @throws IOException if an I/O error occurs while reading bytes. + * @throws IndexOutOfBoundsException if the index is invalid. + */ public static ReplicaState read(DataInput in) throws IOException { - return cachedValues[in.readByte()]; + byte idx = in.readByte(); + Validate.validIndex(cachedValues, idx, "Index Expected range: [0, " + + (cachedValues.length - 1) + "]. Actual value: " + idx); + return cachedValues[idx]; } /** Write to out */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java index 1aff7669e9823..e5746a09095f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java @@ -457,6 +457,25 @@ public void testReadWriteReplicaState() { out.reset(); in.reset(); } + out = new DataOutputBuffer(); + out.writeByte(100); + in.reset(out.getData(), out.getLength()); + try { + HdfsServerConstants.ReplicaState.read(in); + fail("Should not have reached here"); + } catch (IndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index Expected range: [0, 4]. Actual value: 100"); + } + out.reset(); + in.reset(); + try { + HdfsServerConstants.ReplicaState.getState(200); + fail("Should not have reached here"); + } catch (IndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index Expected range: [0, 4]. Actual value: 200"); + } } catch (Exception ex) { fail("testReadWrite ex error ReplicaState"); } From bc980ec72fe2e71a5b1a56dcd27c9a65c03a74b3 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 12 May 2021 07:04:01 +0530 Subject: [PATCH 309/666] HADOOP-17686. Avoid potential NPE by using Path#getParentPath API in hadoop-huaweicloud (#2990) Signed-off-by: Takanobu Asanuma --- .../hadoop/fs/obs/OBSObjectBucketUtils.java | 9 +++++-- .../main/java/org/apache/hadoop/fs/Path.java | 26 ++++++++++++++++--- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSObjectBucketUtils.java b/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSObjectBucketUtils.java index e632f61ca2db7..ca29a965e9911 100644 --- a/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSObjectBucketUtils.java +++ b/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSObjectBucketUtils.java @@ -55,6 +55,7 @@ import java.util.Comparator; import java.util.LinkedList; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ExecutionException; @@ -872,8 +873,12 @@ private static void getDirectories(final String key, final String sourceKey, directories.add(p.toString()); } while (p.compareTo(sourcePath) > 0) { - p = p.getParent(); - if (p.isRoot() || p.compareTo(sourcePath) == 0) { + Optional parent = p.getOptionalParentPath(); + if (!parent.isPresent()) { + break; + } + p = parent.get(); + if (p.compareTo(sourcePath) == 0) { break; } directories.add(p.toString()); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java index 2649d279aa15f..f70ff01e4a0da 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java @@ -24,6 +24,7 @@ import java.io.Serializable; import java.net.URI; import java.net.URISyntaxException; +import java.util.Optional; import java.util.regex.Pattern; import org.apache.avro.reflect.Stringable; @@ -420,22 +421,39 @@ public String getName() { } /** - * Returns the parent of a path or null if at root. + * Returns the parent of a path or null if at root. Better alternative is + * {@link #getOptionalParentPath()} to handle nullable value for root path. + * * @return the parent of a path or null if at root */ public Path getParent() { + return getParentUtil(); + } + + /** + * Returns the parent of a path as {@link Optional} or + * {@link Optional#empty()} i.e an empty Optional if at root. + * + * @return Parent of path wrappen in {@link Optional}. + * {@link Optional#empty()} i.e an empty Optional if at root. + */ + public Optional getOptionalParentPath() { + return Optional.ofNullable(getParentUtil()); + } + + private Path getParentUtil() { String path = uri.getPath(); int lastSlash = path.lastIndexOf('/'); int start = startPositionWithoutWindowsDrive(path); if ((path.length() == start) || // empty path - (lastSlash == start && path.length() == start+1)) { // at root + (lastSlash == start && path.length() == start + 1)) { // at root return null; } String parent; - if (lastSlash==-1) { + if (lastSlash == -1) { parent = CUR_DIR; } else { - parent = path.substring(0, lastSlash==start?start+1:lastSlash); + parent = path.substring(0, lastSlash == start ? start + 1 : lastSlash); } return new Path(uri.getScheme(), uri.getAuthority(), parent); } From e68138b806512f5abbe3a2daad88ad94537ccf13 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 May 2021 10:57:45 +0900 Subject: [PATCH 310/666] HADOOP-17683. Update commons-io to 2.8.0 (#2974) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Akira Ajisaka Signed-off-by: Wei-Chiu Chuang --- LICENSE-binary | 2 +- .../java/org/apache/hadoop/fs/FileUtil.java | 18 ++++++------------ hadoop-project/pom.xml | 2 +- 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/LICENSE-binary b/LICENSE-binary index 5af4b60f6cf01..be1b797452f7d 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -249,7 +249,7 @@ commons-cli:commons-cli:1.2 commons-codec:commons-codec:1.11 commons-collections:commons-collections:3.2.2 commons-daemon:commons-daemon:1.0.13 -commons-io:commons-io:2.5 +commons-io:commons-io:2.8.0 commons-lang:commons-lang:2.6 commons-logging:commons-logging:1.1.3 commons-net:commons-net:3.6 diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index e078a2c519621..63cbd6212b322 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -604,18 +604,12 @@ public static long getDU(File dir) { return dir.length(); } else { File[] allFiles = dir.listFiles(); - if(allFiles != null) { - for (int i = 0; i < allFiles.length; i++) { - boolean isSymLink; - try { - isSymLink = org.apache.commons.io.FileUtils.isSymlink(allFiles[i]); - } catch(IOException ioe) { - isSymLink = true; - } - if(!isSymLink) { - size += getDU(allFiles[i]); - } - } + if (allFiles != null) { + for (File f : allFiles) { + if (!org.apache.commons.io.FileUtils.isSymlink(f)) { + size += getDU(f); + } + } } return size; } diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index c93afaeff50db..1f0be3b2976a3 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -119,7 +119,7 @@ 3.2.2 1.19 1.0 - 2.5 + 2.8.0 3.7 1.1.3 1.1 From 5af1c1462c794f740d20ea781b06570c281b29f7 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Wed, 12 May 2021 14:54:47 +0200 Subject: [PATCH 311/666] YARN-10571. Refactor dynamic queue handling logic. Contributed by Andras Gyori. --- .../scheduler/capacity/CapacityScheduler.java | 101 +------- .../CapacitySchedulerAutoQueueHandler.java | 140 ----------- .../CapacitySchedulerQueueManager.java | 227 ++++++++++++++++++ ...CapacitySchedulerNewQueueAutoCreation.java | 41 +++- ...WebServicesCapacitySchedDynamicConfig.java | 14 +- 5 files changed, 274 insertions(+), 249 deletions(-) delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerAutoQueueHandler.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 37acb608c8a19..1e81f67df9a7c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -233,8 +233,6 @@ public Configuration getConf() { private AppPriorityACLsManager appPriorityACLManager; private boolean multiNodePlacementEnabled; - private CapacitySchedulerAutoQueueHandler autoQueueHandler; - private boolean printedVerboseLoggingForAsyncScheduling; /** @@ -343,9 +341,6 @@ void initScheduler(Configuration configuration) throws this.labelManager, this.appPriorityACLManager); this.queueManager.setCapacitySchedulerContext(this); - this.autoQueueHandler = new CapacitySchedulerAutoQueueHandler( - this.queueManager); - this.workflowPriorityMappingsMgr = new WorkflowPriorityMappingsManager(); this.activitiesManager = new ActivitiesManager(rmContext); @@ -970,7 +965,8 @@ private CSQueue getOrCreateQueueFromPlacementContext(ApplicationId if (fallbackContext.hasParentQueue()) { try { - return autoCreateLeafQueue(fallbackContext); + writeLock.lock(); + return queueManager.createQueue(fallbackContext); } catch (YarnException | IOException e) { // A null queue is expected if the placementContext is null. In order // not to disrupt the control flow, if we fail to auto create a queue, @@ -1007,6 +1003,8 @@ private CSQueue getOrCreateQueueFromPlacementContext(ApplicationId new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED, message)); } + } finally { + writeLock.unlock(); } } } @@ -2557,30 +2555,7 @@ public void removeQueue(String queueName) throws SchedulerDynamicEditException { writeLock.lock(); try { - LOG.info("Removing queue: " + queueName); - CSQueue q = this.getQueue(queueName); - if (!(AbstractAutoCreatedLeafQueue.class.isAssignableFrom( - q.getClass()))) { - throw new SchedulerDynamicEditException( - "The queue that we are asked " + "to remove (" + queueName - + ") is not a AutoCreatedLeafQueue or ReservationQueue"); - } - AbstractAutoCreatedLeafQueue disposableLeafQueue = - (AbstractAutoCreatedLeafQueue) q; - // at this point we should have no more apps - if (disposableLeafQueue.getNumApplications() > 0) { - throw new SchedulerDynamicEditException( - "The queue " + queueName + " is not empty " + disposableLeafQueue - .getApplications().size() + " active apps " - + disposableLeafQueue.getPendingApplications().size() - + " pending apps"); - } - - ((AbstractManagedParentQueue) disposableLeafQueue.getParent()) - .removeChildQueue(q); - this.queueManager.removeQueue(queueName); - LOG.info( - "Removal of AutoCreatedLeafQueue " + queueName + " has succeeded"); + queueManager.removeLegacyDynamicQueue(queueName); } finally { writeLock.unlock(); } @@ -2629,34 +2604,7 @@ public void addQueue(Queue queue) throws SchedulerDynamicEditException, IOException { writeLock.lock(); try { - if (queue == null) { - throw new SchedulerDynamicEditException( - "Queue specified is null. Should be an implementation of " - + "AbstractAutoCreatedLeafQueue"); - } else if (!(AbstractAutoCreatedLeafQueue.class - .isAssignableFrom(queue.getClass()))) { - throw new SchedulerDynamicEditException( - "Queue is not an implementation of " - + "AbstractAutoCreatedLeafQueue : " + queue.getClass()); - } - - AbstractAutoCreatedLeafQueue newQueue = - (AbstractAutoCreatedLeafQueue) queue; - - if (newQueue.getParent() == null || !(AbstractManagedParentQueue.class. - isAssignableFrom(newQueue.getParent().getClass()))) { - throw new SchedulerDynamicEditException( - "ParentQueue for " + newQueue + " is not properly set" - + " (should be set and be a PlanQueue or ManagedParentQueue)"); - } - - AbstractManagedParentQueue parent = - (AbstractManagedParentQueue) newQueue.getParent(); - String queuePath = newQueue.getQueuePath(); - parent.addChildQueue(newQueue); - this.queueManager.addQueue(queuePath, newQueue); - - LOG.info("Creation of AutoCreatedLeafQueue " + newQueue + " succeeded"); + queueManager.addLegacyDynamicQueue(queue); } finally { writeLock.unlock(); } @@ -3490,41 +3438,4 @@ public boolean placementConstraintEnabled() { public void setQueueManager(CapacitySchedulerQueueManager qm) { this.queueManager = qm; } - - private LeafQueue autoCreateLeafQueue( - ApplicationPlacementContext placementContext) - throws IOException, YarnException { - String leafQueueName = placementContext.getQueue(); - String parentQueueName = placementContext.getParentQueue(); - - if (!StringUtils.isEmpty(parentQueueName)) { - CSQueue parentQueue = getQueue(parentQueueName); - - if (parentQueue != null && - conf.isAutoCreateChildQueueEnabled(parentQueue.getQueuePath())) { - // Case 1: Handle ManagedParentQueue - ManagedParentQueue autoCreateEnabledParentQueue = - (ManagedParentQueue) parentQueue; - AutoCreatedLeafQueue autoCreatedLeafQueue = - new AutoCreatedLeafQueue( - this, leafQueueName, autoCreateEnabledParentQueue); - - addQueue(autoCreatedLeafQueue); - return autoCreatedLeafQueue; - - } else { - try { - writeLock.lock(); - return autoQueueHandler.autoCreateQueue(placementContext); - } finally { - writeLock.unlock(); - } - } - } - - throw new SchedulerDynamicEditException( - "Could not auto-create leaf queue for " + leafQueueName - + ". Queue mapping does not specify" - + " which parent queue it needs to be created under."); - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerAutoQueueHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerAutoQueueHandler.java deleted file mode 100644 index 898b075d2e476..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerAutoQueueHandler.java +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; - -import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.server.resourcemanager.placement.ApplicationPlacementContext; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/** - * Manages the validation and the creation of a Capacity Scheduler - * queue at runtime. - */ -public class CapacitySchedulerAutoQueueHandler { - private final CapacitySchedulerQueueManager queueManager; - private static final int MAXIMUM_DEPTH_ALLOWED = 2; - - public CapacitySchedulerAutoQueueHandler( - CapacitySchedulerQueueManager queueManager) { - this.queueManager = queueManager; - } - - /** - * Creates a LeafQueue and its upper hierarchy given a path. A parent is - * eligible for creation if either the placement context creation flags are - * set, or the auto queue creation is enabled for the first static parent in - * the hierarchy. - * - * @param queue the application placement information of the queue - * @return LeafQueue part of a given queue path - * @throws YarnException if the given path is not eligible to be auto created - */ - public LeafQueue autoCreateQueue(ApplicationPlacementContext queue) - throws YarnException { - ApplicationPlacementContext parentContext = - CSQueueUtils.extractQueuePath(queue.getParentQueue()); - List parentsToCreate = new ArrayList<>(); - - ApplicationPlacementContext queueCandidateContext = parentContext; - CSQueue firstExistingQueue = getQueue( - queueCandidateContext.getFullQueuePath()); - - while (firstExistingQueue == null) { - parentsToCreate.add(queueCandidateContext); - queueCandidateContext = CSQueueUtils.extractQueuePath( - queueCandidateContext.getParentQueue()); - firstExistingQueue = getQueue( - queueCandidateContext.getFullQueuePath()); - } - - CSQueue firstExistingStaticQueue = firstExistingQueue; - // Include the LeafQueue in the distance - int firstStaticParentDistance = parentsToCreate.size() + 1; - - while(isNonStaticParent(firstExistingStaticQueue)) { - queueCandidateContext = CSQueueUtils.extractQueuePath( - queueCandidateContext.getParentQueue()); - firstExistingStaticQueue = getQueue( - queueCandidateContext.getFullQueuePath()); - ++firstStaticParentDistance; - } - - // Reverse the collection to to represent the hierarchy to be created - // from highest to lowest level - Collections.reverse(parentsToCreate); - - if (!(firstExistingQueue instanceof ParentQueue)) { - throw new SchedulerDynamicEditException( - "Could not auto create hierarchy of " - + queue.getFullQueuePath() + ". Queue " - + firstExistingQueue.getQueuePath() + - " is not a ParentQueue." - ); - } - ParentQueue existingParentQueue = (ParentQueue) firstExistingQueue; - int depthLimit = extractDepthLimit(existingParentQueue); - - if (depthLimit == 0) { - throw new SchedulerDynamicEditException("Auto creation of queue " + - queue.getFullQueuePath() + " is not enabled under parent " - + existingParentQueue.getQueuePath()); - } - - if (firstStaticParentDistance > depthLimit) { - throw new SchedulerDynamicEditException( - "Could not auto create queue " + queue.getFullQueuePath() - + ". The distance of the LeafQueue from the first static " + - "ParentQueue is" + firstStaticParentDistance + ", which is " + - "above the limit."); - } - - for (ApplicationPlacementContext current : parentsToCreate) { - existingParentQueue = existingParentQueue - .addDynamicParentQueue(current.getFullQueuePath()); - queueManager.addQueue(existingParentQueue.getQueuePath(), - existingParentQueue); - } - - LeafQueue leafQueue = existingParentQueue.addDynamicLeafQueue( - queue.getFullQueuePath()); - queueManager.addQueue(leafQueue.getQueuePath(), leafQueue); - - return leafQueue; - } - - private int extractDepthLimit(ParentQueue parentQueue) { - if (parentQueue.isEligibleForAutoQueueCreation()) { - return MAXIMUM_DEPTH_ALLOWED; - } else { - return 0; - } - } - - private CSQueue getQueue(String queue) { - return queue != null ? queueManager.getQueue(queue) : null; - } - - private boolean isNonStaticParent(CSQueue queue) { - return (!(queue instanceof AbstractCSQueue) - || ((AbstractCSQueue) queue).isDynamicQueue()); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java index 5cd14908e8914..a86a2536d0584 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java @@ -23,10 +23,13 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.yarn.server.resourcemanager.placement.ApplicationPlacementContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -70,6 +73,7 @@ public CSQueue hook(CSQueue queue) { } } + private static final int MAXIMUM_DYNAMIC_QUEUE_DEPTH = 2; private static final QueueHook NOOP = new QueueHook(); private CapacitySchedulerContext csContext; private final YarnAuthorizationProvider authorizer; @@ -437,6 +441,229 @@ private Map> getQueueToLabels() { return this.queueStateManager; } + /** + * Removes an {@code AutoCreatedLeafQueue} from the manager collection and + * from its parent children collection. + * + * @param queueName queue to be removed + * @throws SchedulerDynamicEditException if queue is not eligible for deletion + */ + public void removeLegacyDynamicQueue(String queueName) + throws SchedulerDynamicEditException { + LOG.info("Removing queue: " + queueName); + CSQueue q = this.getQueue(queueName); + if (q == null || !(AbstractAutoCreatedLeafQueue.class.isAssignableFrom( + q.getClass()))) { + throw new SchedulerDynamicEditException( + "The queue that we are asked " + "to remove (" + queueName + + ") is not a AutoCreatedLeafQueue or ReservationQueue"); + } + AbstractAutoCreatedLeafQueue disposableLeafQueue = + (AbstractAutoCreatedLeafQueue) q; + // at this point we should have no more apps + if (disposableLeafQueue.getNumApplications() > 0) { + throw new SchedulerDynamicEditException( + "The queue " + queueName + " is not empty " + disposableLeafQueue + .getApplications().size() + " active apps " + + disposableLeafQueue.getPendingApplications().size() + + " pending apps"); + } + + ((AbstractManagedParentQueue) disposableLeafQueue.getParent()) + .removeChildQueue(q); + removeQueue(queueName); + LOG.info( + "Removal of AutoCreatedLeafQueue " + queueName + " has succeeded"); + } + + /** + * Adds an {@code AutoCreatedLeafQueue} to the manager collection and extends + * the children collection of its parent. + * + * @param queue to be added + * @throws SchedulerDynamicEditException if queue is not eligible to be added + * @throws IOException if parent can not accept the queue + */ + public void addLegacyDynamicQueue(Queue queue) + throws SchedulerDynamicEditException, IOException { + if (queue == null) { + throw new SchedulerDynamicEditException( + "Queue specified is null. Should be an implementation of " + + "AbstractAutoCreatedLeafQueue"); + } else if (!(AbstractAutoCreatedLeafQueue.class + .isAssignableFrom(queue.getClass()))) { + throw new SchedulerDynamicEditException( + "Queue is not an implementation of " + + "AbstractAutoCreatedLeafQueue : " + queue.getClass()); + } + + AbstractAutoCreatedLeafQueue newQueue = + (AbstractAutoCreatedLeafQueue) queue; + + if (newQueue.getParent() == null || !(AbstractManagedParentQueue.class. + isAssignableFrom(newQueue.getParent().getClass()))) { + throw new SchedulerDynamicEditException( + "ParentQueue for " + newQueue + " is not properly set" + + " (should be set and be a PlanQueue or ManagedParentQueue)"); + } + + AbstractManagedParentQueue parent = + (AbstractManagedParentQueue) newQueue.getParent(); + String queuePath = newQueue.getQueuePath(); + parent.addChildQueue(newQueue); + addQueue(queuePath, newQueue); + + LOG.info("Creation of AutoCreatedLeafQueue " + newQueue + " succeeded"); + } + + /** + * Auto creates a LeafQueue and its upper hierarchy given a path at runtime. + * + * @param queue the application placement information of the queue + * @return the auto created LeafQueue + * @throws YarnException if the given path is not eligible to be auto created + * @throws IOException if the given path can not be added to the parent + */ + public LeafQueue createQueue(ApplicationPlacementContext queue) + throws YarnException, IOException { + String leafQueueName = queue.getQueue(); + String parentQueueName = queue.getParentQueue(); + + if (!StringUtils.isEmpty(parentQueueName)) { + CSQueue parentQueue = getQueue(parentQueueName); + + if (parentQueue != null && csContext.getConfiguration() + .isAutoCreateChildQueueEnabled(parentQueue.getQueuePath())) { + return createLegacyAutoQueue(queue); + } else { + return createAutoQueue(queue); + } + } + + throw new SchedulerDynamicEditException( + "Could not auto-create leaf queue for " + leafQueueName + + ". Queue mapping does not specify" + + " which parent queue it needs to be created under."); + } + + /** + * Determines the missing parent paths of a potentially auto creatable queue. + * The missing parents are sorted in a way that the first item is the highest + * in the hierarchy. + * Example: + * root.a, root.a.b, root.a.b.c + * + * @param queue to be auto created + * @return missing parent paths + * @throws SchedulerDynamicEditException if the given queue is not eligible + * to be auto created + */ + public List determineMissingParents( + ApplicationPlacementContext queue) throws SchedulerDynamicEditException { + if (!queue.hasParentQueue()) { + throw new SchedulerDynamicEditException("Can not auto create queue " + + queue.getFullQueuePath() + " due to missing ParentQueue path."); + } + + // Start from the first parent + int firstStaticParentDistance = 1; + + StringBuilder parentCandidate = new StringBuilder(queue.getParentQueue()); + LinkedList parentsToCreate = new LinkedList<>(); + + CSQueue firstExistingParent = getQueue(parentCandidate.toString()); + CSQueue firstExistingStaticParent = firstExistingParent; + + while (isNonStaticParent(firstExistingStaticParent) + && parentCandidate.length() != 0) { + ++firstStaticParentDistance; + + if (firstStaticParentDistance > MAXIMUM_DYNAMIC_QUEUE_DEPTH) { + throw new SchedulerDynamicEditException( + "Could not auto create queue " + queue.getFullQueuePath() + + ". The distance of the LeafQueue from the first static " + + "ParentQueue is " + firstStaticParentDistance + ", which is " + + "above the limit."); + } + + if (firstExistingParent == null) { + parentsToCreate.addFirst(parentCandidate.toString()); + } + + int lastIndex = parentCandidate.lastIndexOf("."); + parentCandidate.setLength(Math.max(lastIndex, 0)); + + if (firstExistingParent == null) { + firstExistingParent = getQueue(parentCandidate.toString()); + } + + firstExistingStaticParent = getQueue(parentCandidate.toString()); + } + + if (!(firstExistingParent instanceof ParentQueue)) { + throw new SchedulerDynamicEditException( + "Could not auto create hierarchy of " + + queue.getFullQueuePath() + ". Queue " + queue.getParentQueue() + + " is not a ParentQueue." + ); + } + + ParentQueue existingParentQueue = (ParentQueue) firstExistingParent; + + if (!existingParentQueue.isEligibleForAutoQueueCreation()) { + throw new SchedulerDynamicEditException("Auto creation of queue " + + queue.getFullQueuePath() + " is not enabled under parent " + + existingParentQueue.getQueuePath()); + } + + return parentsToCreate; + } + + private LeafQueue createAutoQueue(ApplicationPlacementContext queue) + throws SchedulerDynamicEditException { + List parentsToCreate = determineMissingParents(queue); + // First existing parent is either the parent of the last missing parent + // or the parent of the given path + String existingParentName = queue.getParentQueue(); + if (!parentsToCreate.isEmpty()) { + existingParentName = parentsToCreate.get(0).substring( + 0, parentsToCreate.get(0).lastIndexOf(".")); + } + + ParentQueue existingParentQueue = (ParentQueue) getQueue( + existingParentName); + + for (String current : parentsToCreate) { + existingParentQueue = existingParentQueue.addDynamicParentQueue(current); + addQueue(existingParentQueue.getQueuePath(), existingParentQueue); + } + + LeafQueue leafQueue = existingParentQueue.addDynamicLeafQueue( + queue.getFullQueuePath()); + addQueue(leafQueue.getQueuePath(), leafQueue); + + return leafQueue; + } + + private LeafQueue createLegacyAutoQueue(ApplicationPlacementContext queue) + throws IOException, SchedulerDynamicEditException { + CSQueue parentQueue = getQueue(queue.getParentQueue()); + // Case 1: Handle ManagedParentQueue + ManagedParentQueue autoCreateEnabledParentQueue = + (ManagedParentQueue) parentQueue; + AutoCreatedLeafQueue autoCreatedLeafQueue = + new AutoCreatedLeafQueue( + csContext, queue.getQueue(), autoCreateEnabledParentQueue); + + addLegacyDynamicQueue(autoCreatedLeafQueue); + return autoCreatedLeafQueue; + } + + private boolean isNonStaticParent(CSQueue queue) { + return (!(queue instanceof AbstractCSQueue) + || ((AbstractCSQueue) queue).isDynamicQueue()); + } + private boolean isDynamicQueue(CSQueue queue) { return (queue instanceof AbstractCSQueue) && ((AbstractCSQueue) queue).isDynamicQueue(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java index c514fc7af2a04..b96c1e4896892 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java @@ -23,7 +23,6 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; @@ -49,6 +48,8 @@ import java.util.Set; import java.util.HashSet; +import java.io.IOException; + public class TestCapacitySchedulerNewQueueAutoCreation extends TestCapacitySchedulerAutoCreatedQueueBase { private static final Logger LOG = LoggerFactory.getLogger( @@ -59,7 +60,7 @@ public class TestCapacitySchedulerNewQueueAutoCreation private MockRM mockRM = null; private CapacityScheduler cs; private CapacitySchedulerConfiguration csConf; - private CapacitySchedulerAutoQueueHandler autoQueueHandler; + private CapacitySchedulerQueueManager autoQueueHandler; private AutoCreatedQueueDeletionPolicy policy = new AutoCreatedQueueDeletionPolicy(); @@ -114,8 +115,7 @@ protected RMNodeLabelsManager createNodeLabelManager() { policy.init(cs.getConfiguration(), cs.getRMContext(), cs); mockRM.start(); cs.start(); - autoQueueHandler = new CapacitySchedulerAutoQueueHandler( - cs.getCapacitySchedulerQueueManager()); + autoQueueHandler = cs.getCapacitySchedulerQueueManager(); mockRM.registerNode("h1:1234", MAX_MEMORY * GB); // label = x } @@ -608,6 +608,34 @@ public void testAutoQueueCreationMaxAppUpdate() throws Exception { Assert.assertEquals(50, e1.getMaxApplications()); } + @Test(expected = SchedulerDynamicEditException.class) + public void testAutoCreateQueueWithAmbiguousNonFullPathParentName() + throws Exception { + startScheduler(); + + createQueue("root.a.a"); + createQueue("a.a"); + } + + @Test + public void testAutoCreateQueueIfFirstExistingParentQueueIsNotStatic() + throws Exception { + startScheduler(); + + // create a dynamic ParentQueue + createQueue("root.a.a-parent-auto.a1-leaf-auto"); + Assert.assertNotNull(cs.getQueue("root.a.a-parent-auto")); + + // create a new dynamic LeafQueue under the existing ParentQueue + createQueue("root.a.a-parent-auto.a2-leaf-auto"); + + CSQueue a2Leaf = cs.getQueue("a2-leaf-auto"); + + // Make sure a2-leaf-auto is under a-parent-auto + Assert.assertEquals("root.a.a-parent-auto", + a2Leaf.getParent().getQueuePath()); + } + @Test public void testAutoCreateQueueIfAmbiguousQueueNames() throws Exception { startScheduler(); @@ -1109,8 +1137,9 @@ public void testRemoveDanglingAutoCreatedQueuesOnReinit() throws Exception { "when its dynamic parent is removed", bAutoLeaf); } - protected LeafQueue createQueue(String queuePath) throws YarnException { - return autoQueueHandler.autoCreateQueue( + protected LeafQueue createQueue(String queuePath) throws YarnException, + IOException { + return autoQueueHandler.createQueue( CSQueueUtils.extractQueuePath(queuePath)); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedDynamicConfig.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedDynamicConfig.java index 3ccb967a6bb8d..663bb6763d98a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedDynamicConfig.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedDynamicConfig.java @@ -38,14 +38,12 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; -import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmissionData; -import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmitter; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueueUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerAutoQueueHandler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.GuiceServletConfig; @@ -85,7 +83,7 @@ public class TestRMWebServicesCapacitySchedDynamicConfig extends private static final int GB = 1024; protected static MockRM RM; - private CapacitySchedulerAutoQueueHandler autoQueueHandler; + private CapacitySchedulerQueueManager autoQueueHandler; private CapacitySchedulerConfiguration csConf; private static class ExpectedQueueWithProperties { @@ -330,13 +328,13 @@ public void testSchedulerResponseWeightModeWithAutoCreatedQueues() private void initAutoQueueHandler() throws Exception { CapacityScheduler cs = (CapacityScheduler) RM.getResourceScheduler(); - autoQueueHandler = new CapacitySchedulerAutoQueueHandler( - cs.getCapacitySchedulerQueueManager()); + autoQueueHandler = cs.getCapacitySchedulerQueueManager(); MockNM nm1 = RM.registerNode("h1:1234", 1200 * GB); // label = x } - private LeafQueue createQueue(String queuePath) throws YarnException { - return autoQueueHandler.autoCreateQueue( + private LeafQueue createQueue(String queuePath) throws YarnException, + IOException { + return autoQueueHandler.createQueue( CSQueueUtils.extractQueuePath(queuePath)); } From 2a79528ecc404a8bc772c88d1690fb7fdaff7f73 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 12 May 2021 20:35:58 +0530 Subject: [PATCH 312/666] HADOOP-17689. Avoid Potential NPE in org.apache.hadoop.fs (#3008) Signed-off-by: Takanobu Asanuma --- .../java/org/apache/hadoop/fs/DelegateToFileSystem.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java index 3a139781e0372..cdb4f3e02cc6f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java @@ -24,6 +24,7 @@ import java.util.Arrays; import java.util.EnumSet; import java.util.List; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.classification.InterfaceAudience; @@ -90,7 +91,11 @@ public FSDataOutputStream createInternal (Path f, if (!createParent) { // parent must exist. // since this.create makes parent dirs automatically // we must throw exception if parent does not exist. - final FileStatus stat = getFileStatus(f.getParent()); + Optional parentPath = f.getOptionalParentPath(); + if (!parentPath.isPresent()) { + throw new FileNotFoundException("Missing parent:" + f); + } + final FileStatus stat = getFileStatus(parentPath.get()); if (stat == null) { throw new FileNotFoundException("Missing parent:" + f); } From 93a1537e84a5249b6e4eef6c276a21e4f7f2b74d Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 13 May 2021 14:22:25 +0900 Subject: [PATCH 313/666] HADOOP-17685. Fix junit deprecation warnings in hadoop-common module. (#2983) Signed-off-by: Takanobu Asanuma --- .../apache/hadoop/conf/TestConfiguration.java | 68 ++++++------ .../key/TestKeyProviderCryptoExtension.java | 3 +- .../crypto/key/kms/TestKMSClientProvider.java | 3 +- .../TestLoadBalancingKMSClientProvider.java | 3 +- .../hadoop/fs/FileSystemContractBaseTest.java | 8 +- .../org/apache/hadoop/fs/TestDefaultUri.java | 22 ++-- .../fs/TestFileSystemStorageStatistics.java | 6 +- .../org/apache/hadoop/fs/TestFsShell.java | 24 ++-- .../org/apache/hadoop/fs/TestFsShellCopy.java | 17 ++- .../org/apache/hadoop/fs/TestFsShellList.java | 15 ++- .../apache/hadoop/fs/TestFsShellTouch.java | 105 ++++++++++-------- .../apache/hadoop/fs/TestLocalFileSystem.java | 8 +- .../fs/TestLocalFileSystemPermission.java | 9 +- .../contract/AbstractFSContractTestBase.java | 6 +- .../hadoop/fs/ftp/TestFTPFileSystem.java | 3 +- .../hadoop/fs/sftp/TestSFTPFileSystem.java | 59 ++++++---- .../hadoop/fs/shell/TestPrintableString.java | 8 +- .../apache/hadoop/fs/shell/find/TestAnd.java | 3 +- .../fs/shell/find/TestFilterExpression.java | 3 +- .../apache/hadoop/fs/shell/find/TestFind.java | 3 +- .../hadoop/fs/shell/find/TestIname.java | 3 +- .../apache/hadoop/fs/shell/find/TestName.java | 3 +- .../hadoop/fs/shell/find/TestPrint.java | 3 +- .../hadoop/fs/shell/find/TestPrint0.java | 3 +- .../hadoop/fs/shell/find/TestResult.java | 4 +- .../fs/viewfs/ViewFileSystemBaseTest.java | 24 ++-- .../hadoop/ha/TestZKFailoverController.java | 3 +- .../apache/hadoop/http/TestHttpServer.java | 11 +- .../io/compress/TestDecompressorStream.java | 21 ++-- .../hadoop/io/erasurecode/TestECSchema.java | 3 +- .../erasurecode/coder/TestRSErasureCoder.java | 4 +- .../io/erasurecode/coder/TestXORCoder.java | 4 +- .../io/retry/TestDefaultRetryPolicy.java | 18 +-- .../metrics2/source/TestJvmMetrics.java | 3 +- .../java/org/apache/hadoop/net/TestDNS.java | 15 +-- .../net/TestMockDomainNameResolver.java | 14 +-- .../org/apache/hadoop/security/TestKDiag.java | 3 +- .../hadoop/security/TestKDiagNoKDC.java | 3 +- .../alias/TestCredentialProviderFactory.java | 17 +-- .../TestDefaultImpersonationProvider.java | 4 +- .../TestZKDelegationTokenSecretManager.java | 3 +- ...onTokenAuthenticationHandlerWithMocks.java | 3 +- .../hadoop/service/TestServiceOperations.java | 7 +- .../AbstractServiceLauncherTestBase.java | 3 +- .../hadoop/test/PlatformAssumptions.java | 2 +- .../hadoop/test/UnitTestcaseTimeLimit.java | 5 +- .../apache/hadoop/util/TestCrcComposer.java | 3 +- .../org/apache/hadoop/util/TestCrcUtil.java | 3 +- .../util/TestDiskCheckerWithDiskIo.java | 5 +- .../apache/hadoop/util/TestNativeCrc32.java | 34 +++--- .../hadoop/util/TestReflectionUtils.java | 6 +- .../org/apache/hadoop/util/TestShell.java | 3 +- .../org/apache/hadoop/util/TestWinUtils.java | 31 +++--- 53 files changed, 330 insertions(+), 317 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index a2273ef34faba..085b20442546a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -50,9 +50,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.assertj.core.api.Assertions; import org.junit.After; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; import static org.apache.hadoop.conf.StorageUnit.BYTES; @@ -60,7 +60,6 @@ import static org.apache.hadoop.conf.StorageUnit.KB; import static org.apache.hadoop.conf.StorageUnit.MB; import static org.apache.hadoop.conf.StorageUnit.TB; -import static org.hamcrest.core.Is.is; import static org.junit.Assert.*; import org.apache.commons.lang3.StringUtils; @@ -78,14 +77,10 @@ import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Logger; import org.apache.log4j.spi.LoggingEvent; -import org.hamcrest.CoreMatchers; -import org.junit.rules.ExpectedException; import org.mockito.Mockito; public class TestConfiguration { - @Rule - public ExpectedException thrown= ExpectedException.none(); private static final double DOUBLE_DELTA = 0.000000001f; private Configuration conf; final static String CONFIG = new File("./test-config-TestConfiguration.xml").getAbsolutePath(); @@ -1488,61 +1483,64 @@ public void testStorageUnit() { conf.setStorageSize(key, 10, MB); // This call returns the value specified in the Key as a double in MBs. - assertThat(conf.getStorageSize(key, "1GB", MB), - is(10.0)); + Assertions.assertThat(conf.getStorageSize(key, "1GB", MB)) + .isEqualTo(10.0); // Since this key is missing, This call converts the default value of 1GB // to MBs are returns that value. - assertThat(conf.getStorageSize(nonKey, "1GB", MB), - is(1024.0)); + Assertions.assertThat(conf.getStorageSize(nonKey, "1GB", MB)) + .isEqualTo(1024.0); conf.setStorageSize(key, 1024, BYTES); - assertThat(conf.getStorageSize(key, 100, KB), is(1.0)); + Assertions.assertThat(conf.getStorageSize(key, 100, KB)).isEqualTo(1.0); - assertThat(conf.getStorageSize(nonKey, 100.0, KB), is(100.0)); + Assertions.assertThat(conf.getStorageSize(nonKey, 100.0, KB)) + .isEqualTo(100.0); // We try out different kind of String formats to see if they work and // during read, we also try to read using a different Storage Units. conf.setStrings(key, "1TB"); - assertThat(conf.getStorageSize(key, "1PB", GB), is(1024.0)); + Assertions.assertThat(conf.getStorageSize(key, "1PB", GB)) + .isEqualTo(1024.0); conf.setStrings(key, "1bytes"); - assertThat(conf.getStorageSize(key, "1PB", KB), is(0.001)); + Assertions.assertThat(conf.getStorageSize(key, "1PB", KB)) + .isEqualTo(0.001); conf.setStrings(key, "2048b"); - assertThat(conf.getStorageSize(key, "1PB", KB), is(2.0)); + Assertions.assertThat(conf.getStorageSize(key, "1PB", KB)).isEqualTo(2.0); conf.setStrings(key, "64 GB"); - assertThat(conf.getStorageSize(key, "1PB", GB), is(64.0)); + Assertions.assertThat(conf.getStorageSize(key, "1PB", GB)).isEqualTo(64.0); // Match the parsing patterns of getLongBytes, which takes single char // suffix. conf.setStrings(key, "1T"); - assertThat(conf.getStorageSize(key, "1GB", TB), is(1.0)); + Assertions.assertThat(conf.getStorageSize(key, "1GB", TB)).isEqualTo(1.0); conf.setStrings(key, "1k"); - assertThat(conf.getStorageSize(key, "1GB", KB), is(1.0)); + Assertions.assertThat(conf.getStorageSize(key, "1GB", KB)).isEqualTo(1.0); conf.setStrings(key, "10m"); - assertThat(conf.getStorageSize(key, "1GB", MB), is(10.0)); + Assertions.assertThat(conf.getStorageSize(key, "1GB", MB)).isEqualTo(10.0); // Missing format specification, this should throw. conf.setStrings(key, "100"); - thrown.expect(IllegalArgumentException.class); - conf.getStorageSize(key, "1PB", GB); + assertThrows(IllegalArgumentException.class, + () -> conf.getStorageSize(key, "1PB", GB)); // illegal format specification, this should throw. conf.setStrings(key, "1HB"); - thrown.expect(IllegalArgumentException.class); - conf.getStorageSize(key, "1PB", GB); + assertThrows(IllegalArgumentException.class, + () -> conf.getStorageSize(key, "1PB", GB)); // Illegal number specification, this should throw. conf.setStrings(key, "HadoopGB"); - thrown.expect(IllegalArgumentException.class); - conf.getStorageSize(key, "1PB", GB); + assertThrows(IllegalArgumentException.class, + () -> conf.getStorageSize(key, "1PB", GB)); } @Test @@ -2424,10 +2422,10 @@ public void testGetPasswordDeprecatedKeyStored() throws Exception { Configuration.addDeprecation(oldKey, newKey); - assertThat(conf.getPassword(newKey), - CoreMatchers.is(password.toCharArray())); - assertThat(conf.getPassword(oldKey), - CoreMatchers.is(password.toCharArray())); + Assertions.assertThat(conf.getPassword(newKey)) + .isEqualTo(password.toCharArray()); + Assertions.assertThat(conf.getPassword(oldKey)) + .isEqualTo(password.toCharArray()); FileUtil.fullyDelete(tmpDir); } @@ -2453,10 +2451,10 @@ public void testGetPasswordByDeprecatedKey() throws Exception { Configuration.addDeprecation(oldKey, newKey); - assertThat(conf.getPassword(newKey), - CoreMatchers.is(password.toCharArray())); - assertThat(conf.getPassword(oldKey), - CoreMatchers.is(password.toCharArray())); + Assertions.assertThat(conf.getPassword(newKey)) + .isEqualTo(password.toCharArray()); + Assertions.assertThat(conf.getPassword(oldKey)) + .isEqualTo(password.toCharArray()); FileUtil.fullyDelete(tmpDir); } @@ -2469,7 +2467,7 @@ public void testGettingPropertiesWithPrefix() throws Exception { } conf.set("different.prefix" + ".name", "value"); Map prefixedProps = conf.getPropsWithPrefix("prefix."); - assertThat(prefixedProps.size(), is(10)); + Assertions.assertThat(prefixedProps).hasSize(10); for (int i = 0; i < 10; i++) { assertEquals("value" + i, prefixedProps.get("name" + i)); } @@ -2480,7 +2478,7 @@ public void testGettingPropertiesWithPrefix() throws Exception { conf.set("subprefix." + "subname" + i, "value_${foo}" + i); } prefixedProps = conf.getPropsWithPrefix("subprefix."); - assertThat(prefixedProps.size(), is(10)); + Assertions.assertThat(prefixedProps).hasSize(10); for (int i = 0; i < 10; i++) { assertEquals("value_bar" + i, prefixedProps.get("subname" + i)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java index e897423b39545..0f9d6dc95f428 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java @@ -25,6 +25,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; import javax.crypto.Cipher; import javax.crypto.spec.IvParameterSpec; @@ -58,7 +59,7 @@ public class TestKeyProviderCryptoExtension { private static KeyVersion encryptionKey; @Rule - public Timeout testTimeout = new Timeout(180000); + public Timeout testTimeout = new Timeout(180000, TimeUnit.MILLISECONDS); @BeforeClass public static void setup() throws Exception { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestKMSClientProvider.java index b87f45ac97a31..e437acc3e0584 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestKMSClientProvider.java @@ -36,6 +36,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.util.concurrent.TimeUnit; import static org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_KIND; @@ -57,7 +58,7 @@ public class TestKMSClientProvider { private final String oldTokenService = "host:16000"; @Rule - public Timeout globalTimeout = new Timeout(60000); + public Timeout globalTimeout = new Timeout(60000, TimeUnit.MILLISECONDS); { GenericTestUtils.setLogLevel(KMSClientProvider.LOG, Level.TRACE); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java index 616c66b0748db..3684a1d0db68d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java @@ -39,6 +39,7 @@ import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; import javax.net.ssl.SSLHandshakeException; @@ -67,7 +68,7 @@ public class TestLoadBalancingKMSClientProvider { @Rule - public Timeout testTimeout = new Timeout(30 * 1000); + public Timeout testTimeout = new Timeout(30, TimeUnit.SECONDS); @BeforeClass public static void setup() throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java index 8065b3f61f52c..3a8b1e6ed085d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java @@ -21,6 +21,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; +import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,7 +37,6 @@ import org.junit.After; import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.rules.Timeout; /** @@ -61,7 +61,8 @@ public abstract class FileSystemContractBaseTest { protected byte[] data = dataset(getBlockSize() * 2, 0, 255); @Rule - public Timeout globalTimeout = new Timeout(getGlobalTimeout()); + public Timeout globalTimeout = + new Timeout(getGlobalTimeout(), TimeUnit.MILLISECONDS); /** * Get the timeout in milliseconds for each test case. @@ -71,9 +72,6 @@ protected int getGlobalTimeout() { return 30 * 1000; } - @Rule - public ExpectedException thrown = ExpectedException.none(); - @After public void tearDown() throws Exception { if (fs != null) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java index b84d66aa4ce2e..9572bed4098f4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java @@ -18,9 +18,7 @@ package org.apache.hadoop.fs; import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; import java.net.URI; @@ -40,32 +38,32 @@ public class TestDefaultUri { public void tetGetDefaultUri() { conf.set(FS_DEFAULT_NAME_KEY, "hdfs://nn_host"); URI uri = FileSystem.getDefaultUri(conf); - assertThat(uri.getScheme(), is("hdfs")); - assertThat(uri.getAuthority(), is("nn_host")); + assertThat(uri.getScheme()).isEqualTo("hdfs"); + assertThat(uri.getAuthority()).isEqualTo("nn_host"); } @Test public void tetGetDefaultUriWithPort() { conf.set(FS_DEFAULT_NAME_KEY, "hdfs://nn_host:5432"); URI uri = FileSystem.getDefaultUri(conf); - assertThat(uri.getScheme(), is("hdfs")); - assertThat(uri.getAuthority(), is("nn_host:5432")); + assertThat(uri.getScheme()).isEqualTo("hdfs"); + assertThat(uri.getAuthority()).isEqualTo("nn_host:5432"); } @Test public void tetGetDefaultUriTrailingSlash() { conf.set(FS_DEFAULT_NAME_KEY, "hdfs://nn_host/"); URI uri = FileSystem.getDefaultUri(conf); - assertThat(uri.getScheme(), is("hdfs")); - assertThat(uri.getAuthority(), is("nn_host")); + assertThat(uri.getScheme()).isEqualTo("hdfs"); + assertThat(uri.getAuthority()).isEqualTo("nn_host"); } @Test public void tetGetDefaultUriNoScheme() { conf.set(FS_DEFAULT_NAME_KEY, "nn_host"); URI uri = FileSystem.getDefaultUri(conf); - assertThat(uri.getScheme(), is("hdfs")); - assertThat(uri.getAuthority(), is("nn_host")); + assertThat(uri.getScheme()).isEqualTo("hdfs"); + assertThat(uri.getAuthority()).isEqualTo("nn_host"); } @Test @@ -81,7 +79,7 @@ public void tetGetDefaultUriNoSchemeTrailingSlash() throws Exception { public void tetFsGet() throws IOException { conf.set(FS_DEFAULT_NAME_KEY, "file:///"); FileSystem fs = FileSystem.get(conf); - assertThat(fs, instanceOf(LocalFileSystem.class)); + assertThat(fs).isInstanceOf(LocalFileSystem.class); } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java index fa682649a021a..2b4e686e5929d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java @@ -25,12 +25,12 @@ import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.rules.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Iterator; +import java.util.concurrent.TimeUnit; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -61,9 +61,7 @@ public class TestFileSystemStorageStatistics { new FileSystemStorageStatistics(FS_STORAGE_STATISTICS_NAME, statistics); @Rule - public final Timeout globalTimeout = new Timeout(10 * 1000); - @Rule - public final ExpectedException exception = ExpectedException.none(); + public final Timeout globalTimeout = new Timeout(10, TimeUnit.SECONDS); @Before public void setup() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java index 1c9781881b4b7..67906d526bc8a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java @@ -22,8 +22,7 @@ import org.apache.hadoop.fs.shell.CommandFactory; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; -import org.hamcrest.core.StringContains; -import org.junit.Assert; +import org.assertj.core.api.Assertions; import org.junit.Test; import org.mockito.Mockito; @@ -66,14 +65,14 @@ public void testDFSWithInvalidCommmand() throws Throwable { try (GenericTestUtils.SystemErrCapturer capture = new GenericTestUtils.SystemErrCapturer()) { ToolRunner.run(shell, new String[]{"dfs -mkdirs"}); - Assert.assertThat("FSShell dfs command did not print the error " + - "message when invalid command is passed", - capture.getOutput(), StringContains.containsString( - "-mkdirs: Unknown command")); - Assert.assertThat("FSShell dfs command did not print help " + - "message when invalid command is passed", - capture.getOutput(), StringContains.containsString( - "Usage: hadoop fs [generic options]")); + Assertions.assertThat(capture.getOutput()) + .as("FSShell dfs command did not print the error " + + "message when invalid command is passed") + .contains("-mkdirs: Unknown command"); + Assertions.assertThat(capture.getOutput()) + .as("FSShell dfs command did not print help " + + "message when invalid command is passed") + .contains("Usage: hadoop fs [generic options]"); } } @@ -95,9 +94,8 @@ public void testExceptionNullMessage() throws Exception { try (GenericTestUtils.SystemErrCapturer capture = new GenericTestUtils.SystemErrCapturer()) { ToolRunner.run(shell, new String[]{cmdName}); - Assert.assertThat(capture.getOutput(), - StringContains.containsString(cmdName - + ": Null exception message")); + Assertions.assertThat(capture.getOutput()) + .contains(cmdName + ": Null exception message"); } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java index e3c4ee05c8c8f..62c1b73227a21 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java @@ -19,12 +19,10 @@ package org.apache.hadoop.fs; import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.not; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; @@ -640,15 +638,16 @@ public void testCopyNoParent() throws Exception { final String noDirName = "noDir"; final Path noDir = new Path(noDirName); lfs.delete(noDir, true); - assertThat(lfs.exists(noDir), is(false)); + assertThat(lfs.exists(noDir)).isFalse(); - assertThat("Expected failed put to a path without parent directory", - shellRun("-put", srcPath.toString(), noDirName + "/foo"), is(not(0))); + assertThat(shellRun("-put", srcPath.toString(), noDirName + "/foo")) + .as("Expected failed put to a path without parent directory") + .isNotEqualTo(0); // Note the trailing '/' in the target path. - assertThat("Expected failed copyFromLocal to a non-existent directory", - shellRun("-copyFromLocal", srcPath.toString(), noDirName + "/"), - is(not(0))); + assertThat(shellRun("-copyFromLocal", srcPath.toString(), noDirName + "/")) + .as("Expected failed copyFromLocal to a non-existent directory") + .isNotEqualTo(0); } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java index c780f41053583..05ad5c23e6542 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java @@ -18,14 +18,13 @@ package org.apache.hadoop.fs; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; - import org.apache.hadoop.conf.Configuration; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** * Test FsShell -ls command. */ @@ -45,7 +44,7 @@ public static void setup() throws Exception { String root = System.getProperty("test.build.data", "test/build/data"); testRootDir = lfs.makeQualified(new Path(root, "testFsShellList")); - assertThat(lfs.mkdirs(testRootDir), is(true)); + assertThat(lfs.mkdirs(testRootDir)).isTrue(); } @AfterClass @@ -57,23 +56,23 @@ private void createFile(Path filePath) throws Exception { FSDataOutputStream out = lfs.create(filePath); out.writeChars("I am " + filePath); out.close(); - assertThat(lfs.exists(lfs.getChecksumFile(filePath)), is(true)); + assertThat(lfs.exists(lfs.getChecksumFile(filePath))).isTrue(); } @Test public void testList() throws Exception { createFile(new Path(testRootDir, "abc")); String[] lsArgv = new String[]{"-ls", testRootDir.toString()}; - assertThat(shell.run(lsArgv), is(0)); + assertThat(shell.run(lsArgv)).isEqualTo(0); createFile(new Path(testRootDir, "abc\bd\tef")); createFile(new Path(testRootDir, "ghi")); createFile(new Path(testRootDir, "qq\r123")); lsArgv = new String[]{"-ls", testRootDir.toString()}; - assertThat(shell.run(lsArgv), is(0)); + assertThat(shell.run(lsArgv)).isEqualTo(0); lsArgv = new String[]{"-ls", "-q", testRootDir.toString()}; - assertThat(shell.run(lsArgv), is(0)); + assertThat(shell.run(lsArgv)).isEqualTo(0); } /* diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java index 62e7990674d3b..49bbd5af04f25 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java @@ -17,10 +17,6 @@ */ package org.apache.hadoop.fs; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.not; -import static org.junit.Assert.assertThat; - import java.text.ParseException; import java.util.Date; @@ -34,6 +30,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.assertj.core.api.Assertions.assertThat; + public class TestFsShellTouch { static final Logger LOG = LoggerFactory.getLogger(TestFsShellTouch.class); @@ -71,23 +69,25 @@ public void testTouchz() throws Exception { final String newFileName = "newFile"; final Path newFile = new Path(newFileName); lfs.delete(newFile, true); - assertThat(lfs.exists(newFile), is(false)); + assertThat(lfs.exists(newFile)).isFalse(); - assertThat("Expected successful touchz on a new file", - shellRun("-touchz", newFileName), is(0)); + assertThat(shellRun("-touchz", newFileName)) + .as("Expected successful touchz on a new file").isEqualTo(0); shellRun("-ls", newFileName); - assertThat("Expected successful touchz on an existing zero-length file", - shellRun("-touchz", newFileName), is(0)); + assertThat(shellRun("-touchz", newFileName)) + .as("Expected successful touchz on an existing zero-length file") + .isEqualTo(0); // Ensure noDir does not exist final String noDirName = "noDir"; final Path noDir = new Path(noDirName); lfs.delete(noDir, true); - assertThat(lfs.exists(noDir), is(false)); + assertThat(lfs.exists(noDir)).isFalse(); - assertThat("Expected failed touchz in a non-existent directory", - shellRun("-touchz", noDirName + "/foo"), is(not(0))); + assertThat(shellRun("-touchz", noDirName + "/foo")) + .as("Expected failed touchz in a non-existent directory") + .isNotEqualTo(0); } @Test @@ -96,25 +96,28 @@ public void testTouch() throws Exception { final String newFileName = "newFile2"; final Path newFile = new Path(newFileName); lfs.delete(newFile, true); - assertThat(lfs.exists(newFile), is(false)); + assertThat(lfs.exists(newFile)).isFalse(); { - assertThat( - "Expected successful touch on a non-existent file with -c option", - shellRun("-touch", "-c", newFileName), is(0)); - assertThat(lfs.exists(newFile), is(false)); + assertThat(shellRun("-touch", "-c", newFileName)) + .as("Expected successful touch on a non-existent file" + + " with -c option") + .isEqualTo(0); + assertThat(lfs.exists(newFile)).isFalse(); } { String strTime = formatTimestamp(System.currentTimeMillis()); Date dateObj = parseTimestamp(strTime); - assertThat( - "Expected successful touch on a new file with a specified timestamp", - shellRun("-touch", "-t", strTime, newFileName), is(0)); + assertThat(shellRun("-touch", "-t", strTime, newFileName)) + .as("Expected successful touch on a new file" + + " with a specified timestamp") + .isEqualTo(0); FileStatus new_status = lfs.getFileStatus(newFile); - assertThat(new_status.getAccessTime(), is(dateObj.getTime())); - assertThat(new_status.getModificationTime(), is(dateObj.getTime())); + assertThat(new_status.getAccessTime()).isEqualTo(dateObj.getTime()); + assertThat(new_status.getModificationTime()) + .isEqualTo(dateObj.getTime()); } FileStatus fstatus = lfs.getFileStatus(newFile); @@ -123,14 +126,15 @@ public void testTouch() throws Exception { String strTime = formatTimestamp(System.currentTimeMillis()); Date dateObj = parseTimestamp(strTime); - assertThat("Expected successful touch with a specified access time", - shellRun("-touch", "-a", "-t", strTime, newFileName), is(0)); + assertThat(shellRun("-touch", "-a", "-t", strTime, newFileName)) + .as("Expected successful touch with a specified access time") + .isEqualTo(0); FileStatus new_status = lfs.getFileStatus(newFile); // Verify if access time is recorded correctly (and modification time // remains unchanged). - assertThat(new_status.getAccessTime(), is(dateObj.getTime())); - assertThat(new_status.getModificationTime(), - is(fstatus.getModificationTime())); + assertThat(new_status.getAccessTime()).isEqualTo(dateObj.getTime()); + assertThat(new_status.getModificationTime()) + .isEqualTo(fstatus.getModificationTime()); } fstatus = lfs.getFileStatus(newFile); @@ -139,56 +143,63 @@ public void testTouch() throws Exception { String strTime = formatTimestamp(System.currentTimeMillis()); Date dateObj = parseTimestamp(strTime); - assertThat( - "Expected successful touch with a specified modification time", - shellRun("-touch", "-m", "-t", strTime, newFileName), is(0)); + assertThat(shellRun("-touch", "-m", "-t", strTime, newFileName)) + .as("Expected successful touch with a specified modification time") + .isEqualTo(0); // Verify if modification time is recorded correctly (and access time // remains unchanged). FileStatus new_status = lfs.getFileStatus(newFile); - assertThat(new_status.getAccessTime(), is(fstatus.getAccessTime())); - assertThat(new_status.getModificationTime(), is(dateObj.getTime())); + assertThat(new_status.getAccessTime()) + .isEqualTo(fstatus.getAccessTime()); + assertThat(new_status.getModificationTime()) + .isEqualTo(dateObj.getTime()); } { String strTime = formatTimestamp(System.currentTimeMillis()); Date dateObj = parseTimestamp(strTime); - assertThat("Expected successful touch with a specified timestamp", - shellRun("-touch", "-t", strTime, newFileName), is(0)); + assertThat(shellRun("-touch", "-t", strTime, newFileName)) + .as("Expected successful touch with a specified timestamp") + .isEqualTo(0); // Verify if both modification and access times are recorded correctly FileStatus new_status = lfs.getFileStatus(newFile); - assertThat(new_status.getAccessTime(), is(dateObj.getTime())); - assertThat(new_status.getModificationTime(), is(dateObj.getTime())); + assertThat(new_status.getAccessTime()).isEqualTo(dateObj.getTime()); + assertThat(new_status.getModificationTime()) + .isEqualTo(dateObj.getTime()); } { String strTime = formatTimestamp(System.currentTimeMillis()); Date dateObj = parseTimestamp(strTime); - assertThat("Expected successful touch with a specified timestamp", - shellRun("-touch", "-a", "-m", "-t", strTime, newFileName), is(0)); + assertThat(shellRun("-touch", "-a", "-m", "-t", strTime, newFileName)) + .as("Expected successful touch with a specified timestamp") + .isEqualTo(0); // Verify if both modification and access times are recorded correctly FileStatus new_status = lfs.getFileStatus(newFile); - assertThat(new_status.getAccessTime(), is(dateObj.getTime())); - assertThat(new_status.getModificationTime(), is(dateObj.getTime())); + assertThat(new_status.getAccessTime()).isEqualTo(dateObj.getTime()); + assertThat(new_status.getModificationTime()) + .isEqualTo(dateObj.getTime()); } { - assertThat("Expected failed touch with a missing timestamp", - shellRun("-touch", "-t", newFileName), is(not(0))); + assertThat(shellRun("-touch", "-t", newFileName)) + .as("Expected failed touch with a missing timestamp") + .isNotEqualTo(0); } // Verify -c option when file exists. String strTime = formatTimestamp(System.currentTimeMillis()); Date dateObj = parseTimestamp(strTime); - assertThat( - "Expected successful touch on a non-existent file with -c option", - shellRun("-touch", "-c", "-t", strTime, newFileName), is(0)); + assertThat(shellRun("-touch", "-c", "-t", strTime, newFileName)) + .as("Expected successful touch on a non-existent file with -c option") + .isEqualTo(0); FileStatus fileStatus = lfs.getFileStatus(newFile); - assertThat(fileStatus.getAccessTime(), is(dateObj.getTime())); - assertThat(fileStatus.getModificationTime(), is(dateObj.getTime())); + assertThat(fileStatus.getAccessTime()).isEqualTo(dateObj.getTime()); + assertThat(fileStatus.getModificationTime()).isEqualTo(dateObj.getTime()); } private String formatTimestamp(long timeInMillis) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index e7b72a93f3378..c35a37d48d561 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -40,6 +40,7 @@ import java.util.List; import java.util.Random; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; @@ -74,16 +75,11 @@ public class TestLocalFileSystem { private Configuration conf; private LocalFileSystem fileSys; - /** - * standard test timeout: {@value}. - */ - public static final int DEFAULT_TEST_TIMEOUT = 60 * 1000; - /** * Set the timeout for every test. */ @Rule - public Timeout testTimeout = new Timeout(DEFAULT_TEST_TIMEOUT); + public Timeout testTimeout = new Timeout(60, TimeUnit.SECONDS); private void cleanupFile(FileSystem fs, Path name) throws IOException { assertTrue(fs.exists(name)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java index 81756f9305f21..8e48035d7bd85 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystemPermission.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; +import org.assertj.core.api.Assertions; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -32,8 +33,6 @@ import java.util.StringTokenizer; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.*; /** @@ -245,9 +244,9 @@ public void testSetUmaskInRealTime() throws Exception { assertTrue(localfs.mkdirs(dir2)); FsPermission finalPermission = localfs.getFileStatus(dir2) .getPermission(); - assertThat("With umask 062 permission should not be 755 since the " + - "default permission is 777", new FsPermission("755"), - is(not(finalPermission))); + Assertions.assertThat(new FsPermission("755")).as( + "With umask 062 permission should not be 755 since the " + + "default permission is 777").isNotEqualTo(finalPermission); assertEquals( "With umask 062 we expect 715 since the default permission is 777", new FsPermission("715"), finalPermission); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java index ac9de6d7bfe8c..7b32f28507cb7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java @@ -27,7 +27,7 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; -import org.junit.internal.AssumptionViolatedException; +import org.junit.AssumptionViolatedException; import org.junit.rules.TestName; import org.junit.rules.Timeout; import org.slf4j.Logger; @@ -35,6 +35,7 @@ import java.io.IOException; import java.net.URI; +import java.util.concurrent.TimeUnit; import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; @@ -164,7 +165,8 @@ protected Configuration createConfiguration() { * Set the timeout for every test. */ @Rule - public Timeout testTimeout = new Timeout(getTestTimeoutMillis()); + public Timeout testTimeout = + new Timeout(getTestTimeoutMillis(), TimeUnit.MILLISECONDS); /** * Option for tests to override the default timeout value. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java index 8155d8e2b2ba1..48cdfdd32ecff 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java @@ -22,6 +22,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.Comparator; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.net.ftp.FTP; @@ -57,7 +58,7 @@ public class TestFTPFileSystem { private FtpTestServer server; private java.nio.file.Path testDir; @Rule - public Timeout testTimeout = new Timeout(180000); + public Timeout testTimeout = new Timeout(180000, TimeUnit.MILLISECONDS); @Before public void setUp() throws Exception { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java index 58452f86f5999..e8ba5f211eb8d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java @@ -44,14 +44,15 @@ import org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider; import org.apache.sshd.server.session.ServerSession; import org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory; -import static org.hamcrest.core.Is.is; + import org.junit.After; import org.junit.AfterClass; + +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.BeforeClass; @@ -193,8 +194,9 @@ public void testCreateFile() throws Exception { assertTrue(localFs.exists(file)); assertTrue(sftpFs.delete(file, false)); assertFalse(localFs.exists(file)); - assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), - is(1)); + assertThat( + ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) + .isEqualTo(1); } /** @@ -210,8 +212,9 @@ public void testFileExists() throws Exception { assertTrue(sftpFs.delete(file, false)); assertFalse(sftpFs.exists(file)); assertFalse(localFs.exists(file)); - assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), - is(1)); + assertThat( + ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) + .isEqualTo(1); } /** @@ -235,8 +238,9 @@ public void testReadFile() throws Exception { } } assertTrue(sftpFs.delete(file, false)); - assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), - is(1)); + assertThat( + ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) + .isEqualTo(1); } /** @@ -258,8 +262,9 @@ public void testStatFile() throws Exception { assertEquals(data.length, sstat.getLen()); assertEquals(lstat.getLen(), sstat.getLen()); assertTrue(sftpFs.delete(file, false)); - assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), - is(1)); + assertThat( + ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) + .isEqualTo(1); } /** @@ -271,8 +276,9 @@ public void testStatFile() throws Exception { public void testDeleteNonEmptyDir() throws Exception { Path file = touch(localFs, name.getMethodName().toLowerCase()); sftpFs.delete(localDir, false); - assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), - is(1)); + assertThat( + ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) + .isEqualTo(1); } /** @@ -284,8 +290,9 @@ public void testDeleteNonEmptyDir() throws Exception { public void testDeleteNonExistFile() throws Exception { Path file = new Path(localDir, name.getMethodName().toLowerCase()); assertFalse(sftpFs.delete(file, false)); - assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), - is(1)); + assertThat( + ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) + .isEqualTo(1); } /** @@ -308,8 +315,9 @@ public void testRenameFile() throws Exception { assertFalse(localFs.exists(file1)); assertTrue(sftpFs.delete(file2, false)); - assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), - is(1)); + assertThat( + ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) + .isEqualTo(1); } /** @@ -347,8 +355,9 @@ public void testGetAccessTime() throws IOException { accessTime1 = (accessTime1 / 1000) * 1000; long accessTime2 = sftpFs.getFileStatus(file).getAccessTime(); assertEquals(accessTime1, accessTime2); - assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), - is(1)); + assertThat( + ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) + .isEqualTo(1); } @Test @@ -360,8 +369,9 @@ public void testGetModifyTime() throws IOException { modifyTime1 = (modifyTime1 / 1000) * 1000; long modifyTime2 = sftpFs.getFileStatus(file).getModificationTime(); assertEquals(modifyTime1, modifyTime2); - assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), - is(1)); + assertThat( + ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) + .isEqualTo(1); } @Test @@ -371,17 +381,18 @@ public void testMkDirs() throws IOException { sftpFs.mkdirs(path); assertTrue(localFs.exists(path)); assertTrue(localFs.getFileStatus(path).isDirectory()); - assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), - is(1)); + assertThat( + ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) + .isEqualTo(1); } @Test public void testCloseFileSystemClosesConnectionPool() throws Exception { SFTPFileSystem fs = (SFTPFileSystem) sftpFs; fs.getHomeDirectory(); - assertThat(fs.getConnectionPool().getLiveConnCount(), is(1)); + assertThat(fs.getConnectionPool().getLiveConnCount()).isEqualTo(1); fs.close(); - assertThat(fs.getConnectionPool().getLiveConnCount(), is(0)); + assertThat(fs.getConnectionPool().getLiveConnCount()).isEqualTo(0); ///making sure that re-entrant close calls are safe fs.close(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java index 91bfdd6d3948c..bb325b4832c10 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java @@ -18,18 +18,18 @@ package org.apache.hadoop.fs.shell; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; - import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** * Test {@code PrintableString} class. */ public class TestPrintableString { private void expect(String reason, String raw, String expected) { - assertThat(reason, new PrintableString(raw).toString(), is(expected)); + assertThat(new PrintableString(raw).toString()).as(reason) + .isEqualTo(expected); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java index bb5ca4ca1c599..9111062ef00a3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestAnd.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.Deque; import java.util.LinkedList; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.shell.PathData; import org.junit.Rule; @@ -33,7 +34,7 @@ public class TestAnd { @Rule - public Timeout globalTimeout = new Timeout(10000); + public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS); // test all expressions passing @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java index 7ad0574e183c6..b03be79b03165 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFilterExpression.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.Deque; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.shell.PathData; @@ -35,7 +36,7 @@ public class TestFilterExpression { private FilterExpression test; @Rule - public Timeout globalTimeout = new Timeout(10000); + public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS); @Before public void setup() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFind.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFind.java index de0e512618b4e..959dc59a270b8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFind.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestFind.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.LinkedList; import java.util.NoSuchElementException; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -50,7 +51,7 @@ public class TestFind { @Rule - public Timeout timeout = new Timeout(10000); + public Timeout timeout = new Timeout(10000, TimeUnit.MILLISECONDS); private static FileSystem mockFs; private static Configuration conf; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestIname.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestIname.java index c204322f1e935..f6eafd77b5d2e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestIname.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestIname.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.fs.shell.find.TestHelper.*; import java.io.IOException; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.shell.PathData; @@ -34,7 +35,7 @@ public class TestIname { private Name.Iname name; @Rule - public Timeout globalTimeout = new Timeout(10000); + public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS); @Before public void resetMock() throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestName.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestName.java index 81a405f4cfd4b..8217655b523bb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestName.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestName.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.fs.shell.find.TestHelper.*; import java.io.IOException; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.shell.PathData; @@ -34,7 +35,7 @@ public class TestName { private Name name; @Rule - public Timeout globalTimeout = new Timeout(10000); + public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS); @Before public void resetMock() throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint.java index a5cacc7defb79..5e861fc35f085 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.shell.PathData; import java.io.PrintStream; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.FileSystem; import org.junit.Before; @@ -36,7 +37,7 @@ public class TestPrint { private FileSystem mockFs; @Rule - public Timeout globalTimeout = new Timeout(10000); + public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS); @Before public void resetMock() throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint0.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint0.java index 20c9bd69470f7..94c5c403bec38 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint0.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestPrint0.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.shell.PathData; import java.io.PrintStream; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.FileSystem; import org.junit.Before; @@ -36,7 +37,7 @@ public class TestPrint0 { private FileSystem mockFs; @Rule - public Timeout globalTimeout = new Timeout(10000); + public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS); @Before public void resetMock() throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestResult.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestResult.java index 999ff598d771b..058a0923a43a5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestResult.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/find/TestResult.java @@ -23,10 +23,12 @@ import org.junit.rules.Timeout; import org.junit.Test; +import java.util.concurrent.TimeUnit; + public class TestResult { @Rule - public Timeout globalTimeout = new Timeout(10000); + public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS); // test the PASS value @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java index 05d7974395013..037ea798c9caa 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java @@ -60,6 +60,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.assertj.core.api.Assertions; import org.junit.Assume; import org.junit.Rule; import org.junit.rules.TemporaryFolder; @@ -74,8 +75,6 @@ import org.junit.Test; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.*; /** @@ -486,11 +485,13 @@ void compareBLs(BlockLocation[] viewBL, BlockLocation[] targetBL) { Assert.assertEquals(targetBL.length, viewBL.length); int i = 0; for (BlockLocation vbl : viewBL) { - assertThat(vbl.toString(), equalTo(targetBL[i].toString())); - assertThat(vbl.getOffset(), equalTo(targetBL[i].getOffset())); - assertThat(vbl.getLength(), equalTo(targetBL[i].getLength())); + Assertions.assertThat(vbl.toString()).isEqualTo(targetBL[i].toString()); + Assertions.assertThat(vbl.getOffset()) + .isEqualTo(targetBL[i].getOffset()); + Assertions.assertThat(vbl.getLength()) + .isEqualTo(targetBL[i].getLength()); i++; - } + } } @Test @@ -1025,7 +1026,7 @@ public void testConfLinkSlash() throws Exception { if (e instanceof UnsupportedFileSystemException) { String msg = " Use " + Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH + " instead"; - assertThat(e.getMessage(), containsString(msg)); + GenericTestUtils.assertExceptionContains(msg, e); } else { fail("Unexpected exception: " + e.getMessage()); } @@ -1262,8 +1263,7 @@ public void testLinkTarget() throws Exception { fail("Resolving link target for a ViewFs mount link should fail!"); } catch (Exception e) { LOG.info("Expected exception: " + e); - assertThat(e.getMessage(), - containsString("not a symbolic link")); + GenericTestUtils.assertExceptionContains("not a symbolic link", e); } try { @@ -1272,8 +1272,7 @@ public void testLinkTarget() throws Exception { fail("Resolving link target for a non sym link should fail!"); } catch (Exception e) { LOG.info("Expected exception: " + e); - assertThat(e.getMessage(), - containsString("not a symbolic link")); + GenericTestUtils.assertExceptionContains("not a symbolic link", e); } try { @@ -1281,8 +1280,7 @@ public void testLinkTarget() throws Exception { fail("Resolving link target for a non existing link should fail!"); } catch (Exception e) { LOG.info("Expected exception: " + e); - assertThat(e.getMessage(), - containsString("File does not exist:")); + GenericTestUtils.assertExceptionContains("File does not exist:", e); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java index d4f548c88b9d9..d90702380178e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java @@ -22,6 +22,7 @@ import java.net.InetSocketAddress; import java.security.NoSuchAlgorithmException; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; @@ -56,7 +57,7 @@ public class TestZKFailoverController extends ClientBaseWithFixes { * Set the timeout for every test */ @Rule - public Timeout testTimeout = new Timeout(3 * 60 * 1000); + public Timeout testTimeout = new Timeout(3, TimeUnit.MINUTES); // Set up ZK digest-based credentials for the purposes of the tests, // to make sure all of our functionality works with auth and ACLs diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index e3cb028f5f553..b1255d19d9086 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -38,9 +38,7 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -81,9 +79,6 @@ public class TestHttpServer extends HttpServerFunctionalTest { private static HttpServer2 server; private static final int MAX_THREADS = 10; - @Rule - public ExpectedException exception = ExpectedException.none(); - @SuppressWarnings("serial") public static class EchoMapServlet extends HttpServlet { @SuppressWarnings("unchecked") @@ -368,11 +363,11 @@ private HttpURLConnection getHttpURLConnection(HttpServer2 httpServer) } @Test - public void testHttpResonseInvalidValueType() throws Exception { + public void testHttpResonseInvalidValueType() { Configuration conf = new Configuration(); boolean xFrameEnabled = true; - exception.expect(IllegalArgumentException.class); - createServer(xFrameEnabled, "Hadoop", conf); + assertThrows(IllegalArgumentException.class, () -> + createServer(xFrameEnabled, "Hadoop", conf)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestDecompressorStream.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestDecompressorStream.java index 5a41e7ffb2f1f..1e9f59b7a51ee 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestDecompressorStream.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestDecompressorStream.java @@ -17,8 +17,7 @@ */ package org.apache.hadoop.io.compress; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -48,7 +47,7 @@ public void setUp() throws IOException { @Test public void testReadOneByte() throws IOException { for (int i = 0; i < TEST_STRING.length(); ++i) { - assertThat(decompressorStream.read(), is((int) TEST_STRING.charAt(i))); + assertThat(decompressorStream.read()).isEqualTo(TEST_STRING.charAt(i)); } try { int ret = decompressorStream.read(); @@ -68,8 +67,8 @@ public void testReadBuffer() throws IOException { int n = Math.min(bytesToRead, buf.length); int bytesRead = decompressorStream.read(buf, 0, n); assertTrue(bytesRead > 0 && bytesRead <= n); - assertThat(new String(buf, 0, bytesRead), - is(TEST_STRING.substring(i, i + bytesRead))); + assertThat(new String(buf, 0, bytesRead)) + .isEqualTo(TEST_STRING.substring(i, i + bytesRead)); bytesToRead = bytesToRead - bytesRead; i = i + bytesRead; } @@ -83,12 +82,12 @@ public void testReadBuffer() throws IOException { @Test public void testSkip() throws IOException { - assertThat(decompressorStream.skip(12), is(12L)); - assertThat(decompressorStream.read(), is((int)TEST_STRING.charAt(12))); - assertThat(decompressorStream.read(), is((int)TEST_STRING.charAt(13))); - assertThat(decompressorStream.read(), is((int)TEST_STRING.charAt(14))); - assertThat(decompressorStream.skip(10), is(10L)); - assertThat(decompressorStream.read(), is((int)TEST_STRING.charAt(25))); + assertThat(decompressorStream.skip(12)).isEqualTo(12L); + assertThat(decompressorStream.read()).isEqualTo(TEST_STRING.charAt(12)); + assertThat(decompressorStream.read()).isEqualTo(TEST_STRING.charAt(13)); + assertThat(decompressorStream.read()).isEqualTo(TEST_STRING.charAt(14)); + assertThat(decompressorStream.skip(10)).isEqualTo(10L); + assertThat(decompressorStream.read()).isEqualTo(TEST_STRING.charAt(25)); try { long ret = decompressorStream.skip(1000); fail("Not reachable but got ret " + ret); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java index ae03835571fc1..2a3c590ae2339 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java @@ -27,11 +27,12 @@ import java.util.HashMap; import java.util.Map; import java.util.TreeMap; +import java.util.concurrent.TimeUnit; public class TestECSchema { @Rule - public Timeout globalTimeout = new Timeout(300000); + public Timeout globalTimeout = new Timeout(300000, TimeUnit.MILLISECONDS); @Test public void testGoodSchema() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestRSErasureCoder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestRSErasureCoder.java index 19054cecff013..726d2c1284e60 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestRSErasureCoder.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestRSErasureCoder.java @@ -25,12 +25,14 @@ import org.junit.Test; import org.junit.rules.Timeout; +import java.util.concurrent.TimeUnit; + /** * Test Reed-Solomon encoding and decoding. */ public class TestRSErasureCoder extends TestErasureCoderBase { @Rule - public Timeout globalTimeout = new Timeout(300000); + public Timeout globalTimeout = new Timeout(300000, TimeUnit.MILLISECONDS); @Before public void setup() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestXORCoder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestXORCoder.java index a44de1e08984c..d1ceec8121acd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestXORCoder.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestXORCoder.java @@ -22,13 +22,15 @@ import org.junit.Test; import org.junit.rules.Timeout; +import java.util.concurrent.TimeUnit; + /** * Test XOR encoding and decoding. */ public class TestXORCoder extends TestErasureCoderBase { @Rule - public Timeout globalTimeout = new Timeout(300000); + public Timeout globalTimeout = new Timeout(300000, TimeUnit.MILLISECONDS); @Before public void setup() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestDefaultRetryPolicy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestDefaultRetryPolicy.java index 6b82077e8df91..1a934f4ed86ed 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestDefaultRetryPolicy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestDefaultRetryPolicy.java @@ -27,16 +27,16 @@ import org.junit.rules.Timeout; import java.io.IOException; +import java.util.concurrent.TimeUnit; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; /** * Test the behavior of the default retry policy. */ public class TestDefaultRetryPolicy { @Rule - public Timeout timeout = new Timeout(30000); + public Timeout timeout = new Timeout(30000, TimeUnit.MILLISECONDS); /** Verify FAIL < RETRY < FAILOVER_AND_RETRY. */ @Test @@ -65,8 +65,8 @@ public void testWithRetriable() throws Exception { null); RetryPolicy.RetryAction action = policy.shouldRetry( new RetriableException("Dummy exception"), 0, 0, true); - assertThat(action.action, - is(RetryPolicy.RetryAction.RetryDecision.RETRY)); + assertThat(action.action) + .isEqualTo(RetryPolicy.RetryAction.RetryDecision.RETRY); } /** @@ -87,8 +87,8 @@ public void testWithWrappedRetriable() throws Exception { RetryPolicy.RetryAction action = policy.shouldRetry( new RemoteException(RetriableException.class.getName(), "Dummy exception"), 0, 0, true); - assertThat(action.action, - is(RetryPolicy.RetryAction.RetryDecision.RETRY)); + assertThat(action.action) + .isEqualTo(RetryPolicy.RetryAction.RetryDecision.RETRY); } /** @@ -107,7 +107,7 @@ public void testWithRetriableAndRetryDisabled() throws Exception { null); RetryPolicy.RetryAction action = policy.shouldRetry( new RetriableException("Dummy exception"), 0, 0, true); - assertThat(action.action, - is(RetryPolicy.RetryAction.RetryDecision.FAIL)); + assertThat(action.action).isEqualTo( + RetryPolicy.RetryAction.RetryDecision.FAIL); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java index ea86fc14c7978..5eca1296994cc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java @@ -40,6 +40,7 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.apache.hadoop.metrics2.source.JvmMetricsInfo.*; import static org.apache.hadoop.metrics2.impl.MsInfo.*; @@ -47,7 +48,7 @@ public class TestJvmMetrics { @Rule - public Timeout timeout = new Timeout(30000); + public Timeout timeout = new Timeout(30000, TimeUnit.MILLISECONDS); private JvmPauseMonitor pauseMonitor; private GcTimeMonitor gcTimeMonitor; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java index 3aa0acdcf4c45..c114c356516f3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java @@ -30,13 +30,12 @@ import org.apache.hadoop.util.Time; +import org.assertj.core.api.Assertions; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.core.Is.is; import static org.junit.Assert.*; /** @@ -104,7 +103,7 @@ private InetAddress getLocalIPAddr() throws UnknownHostException { @Test public void testNullInterface() throws Exception { String host = DNS.getDefaultHost(null); // should work. - assertThat(host, is(DNS.getDefaultHost(DEFAULT))); + Assertions.assertThat(host).isEqualTo(DNS.getDefaultHost(DEFAULT)); try { String ip = DNS.getDefaultIP(null); fail("Expected a NullPointerException, got " + ip); @@ -120,7 +119,8 @@ public void testNullInterface() throws Exception { @Test public void testNullDnsServer() throws Exception { String host = DNS.getDefaultHost(getLoopbackInterface(), null); - assertThat(host, is(DNS.getDefaultHost(getLoopbackInterface()))); + Assertions.assertThat(host) + .isEqualTo(DNS.getDefaultHost(getLoopbackInterface())); } /** @@ -130,7 +130,8 @@ public void testNullDnsServer() throws Exception { @Test public void testDefaultDnsServer() throws Exception { String host = DNS.getDefaultHost(getLoopbackInterface(), DEFAULT); - assertThat(host, is(DNS.getDefaultHost(getLoopbackInterface()))); + Assertions.assertThat(host) + .isEqualTo(DNS.getDefaultHost(getLoopbackInterface())); } /** @@ -204,7 +205,7 @@ public void testLookupWithHostsFallback() throws Exception { getLoopbackInterface(), INVALID_DNS_SERVER, true); // Expect to get back something other than the cached host name. - assertThat(hostname, not(DUMMY_HOSTNAME)); + Assertions.assertThat(hostname).isNotEqualTo(DUMMY_HOSTNAME); } finally { // Restore DNS#cachedHostname for subsequent tests. changeDnsCachedHostname(oldHostname); @@ -227,7 +228,7 @@ public void testLookupWithoutHostsFallback() throws Exception { // Expect to get back the cached host name since there was no hosts // file lookup. - assertThat(hostname, is(DUMMY_HOSTNAME)); + Assertions.assertThat(hostname).isEqualTo(DUMMY_HOSTNAME); } finally { // Restore DNS#cachedHostname for subsequent tests. changeDnsCachedHostname(oldHostname); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestMockDomainNameResolver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestMockDomainNameResolver.java index 5d8f014c72b84..21c6c7279fb81 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestMockDomainNameResolver.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestMockDomainNameResolver.java @@ -20,15 +20,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; /** * This class mainly test the MockDomainNameResolver comes working as expected. @@ -37,9 +36,6 @@ public class TestMockDomainNameResolver { private Configuration conf; - @Rule - public final ExpectedException exception = ExpectedException.none(); - @Before public void setup() { conf = new Configuration(); @@ -60,12 +56,10 @@ public void testMockDomainNameResolverCanBeCreated() throws IOException { } @Test - public void testMockDomainNameResolverCanNotBeCreated() - throws UnknownHostException { + public void testMockDomainNameResolverCanNotBeCreated() { DomainNameResolver resolver = DomainNameResolverFactory.newInstance( conf, CommonConfigurationKeys.HADOOP_DOMAINNAME_RESOLVER_IMPL); - exception.expect(UnknownHostException.class); - resolver.getAllByDomainName( - MockDomainNameResolver.UNKNOW_DOMAIN); + assertThrows(UnknownHostException.class, () -> + resolver.getAllByDomainName(MockDomainNameResolver.UNKNOW_DOMAIN)); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiag.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiag.java index e395566dae739..297d240d30075 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiag.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiag.java @@ -37,6 +37,7 @@ import java.io.FileInputStream; import java.io.IOException; import java.util.Properties; +import java.util.concurrent.TimeUnit; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; import static org.apache.hadoop.security.KDiag.*; @@ -52,7 +53,7 @@ public class TestKDiag extends Assert { public TestName methodName = new TestName(); @Rule - public Timeout testTimeout = new Timeout(30000); + public Timeout testTimeout = new Timeout(30000, TimeUnit.MILLISECONDS); @BeforeClass public static void nameThread() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiagNoKDC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiagNoKDC.java index dbc40c52e5137..2e266bba1f97a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiagNoKDC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestKDiagNoKDC.java @@ -33,6 +33,7 @@ import java.io.File; import java.util.Properties; +import java.util.concurrent.TimeUnit; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FILES; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; @@ -58,7 +59,7 @@ public class TestKDiagNoKDC extends Assert { public TestName methodName = new TestName(); @Rule - public Timeout testTimeout = new Timeout(30000); + public Timeout testTimeout = new Timeout(30000, TimeUnit.MILLISECONDS); @BeforeClass public static void nameThread() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java index f7bb8ec4a964b..fb17977aa2e70 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java @@ -36,13 +36,13 @@ import org.junit.Before; import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; public class TestCredentialProviderFactory { @@ -52,9 +52,6 @@ public class TestCredentialProviderFactory { @Rule public final TestName test = new TestName(); - @Rule - public ExpectedException exception = ExpectedException.none(); - @Before public void announce() { LOG.info("Running test " + test.getMethodName()); @@ -250,18 +247,15 @@ public void testLocalJksProvider() throws Exception { } @Test - public void testLocalBCFKSProvider() throws Exception { + public void testLocalBCFKSProvider() { Configuration conf = new Configuration(); final Path ksPath = new Path(tmpDir.toString(), "test.bcfks"); final String ourUrl = LocalBouncyCastleFipsKeyStoreProvider.SCHEME_NAME + "://file" + ksPath.toUri(); conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl); - - exception.expect(IOException.class); - exception.expectMessage("Can't create keystore"); - List providers = - CredentialProviderFactory.getProviders(conf); - assertTrue("BCFKS needs additional JDK setup", providers.isEmpty()); + Exception exception = assertThrows(IOException.class, + () -> CredentialProviderFactory.getProviders(conf)); + assertEquals("Can't create keystore", exception.getMessage()); } public void checkPermissionRetention(Configuration conf, String ourUrl, @@ -290,4 +284,3 @@ public void checkPermissionRetention(Configuration conf, String ourUrl, "keystore.", "rwxrwxrwx", s.getPermission().toString()); } } - diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestDefaultImpersonationProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestDefaultImpersonationProvider.java index ef86697ab14ba..9c9618ce5b3cf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestDefaultImpersonationProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestDefaultImpersonationProvider.java @@ -29,6 +29,8 @@ import org.junit.rules.Timeout; import org.mockito.Mockito; +import java.util.concurrent.TimeUnit; + /** * Test class for @DefaultImpersonationProvider */ @@ -43,7 +45,7 @@ public class TestDefaultImpersonationProvider { .mock(UserGroupInformation.class); private Configuration conf; @Rule - public Timeout globalTimeout = new Timeout(10000); + public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS); @Before public void setup() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java index 53973055336f2..928768d601afa 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java @@ -24,6 +24,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import org.apache.curator.RetryPolicy; import org.apache.curator.framework.CuratorFramework; @@ -70,7 +71,7 @@ public class TestZKDelegationTokenSecretManager { protected TestingServer zkServer; @Rule - public Timeout globalTimeout = new Timeout(300000); + public Timeout globalTimeout = new Timeout(300000, TimeUnit.MILLISECONDS); @Before public void setup() throws Exception { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java index 0f8f1e45c9005..bc140fa7b1075 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java @@ -47,6 +47,7 @@ import java.io.StringWriter; import java.util.Map; import java.util.Properties; +import java.util.concurrent.TimeUnit; public class TestDelegationTokenAuthenticationHandlerWithMocks { @@ -93,7 +94,7 @@ public AuthenticationToken authenticate(HttpServletRequest request, private DelegationTokenAuthenticationHandler handler; @Rule - public Timeout testTimeout = new Timeout(120000); + public Timeout testTimeout = new Timeout(120000, TimeUnit.MILLISECONDS); @Before public void setUp() throws Exception { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java index 9794c5499802d..b7b86b7aa0dc0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java @@ -30,8 +30,7 @@ import java.io.PrintWriter; import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs; -import static org.hamcrest.CoreMatchers.containsString; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -56,8 +55,8 @@ public void testStopQuietlyWhenServiceStopThrowsException() throws Exception { ServiceOperations.stopQuietly(logger, service); - assertThat(logCapturer.getOutput(), - containsString("When stopping the service " + service.getName())); + assertThat(logCapturer.getOutput()) + .contains("When stopping the service " + service.getName()); verify(e, times(1)).printStackTrace(Mockito.any(PrintWriter.class)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java index d7c86316ef62c..4be670d4638d1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java @@ -41,6 +41,7 @@ import java.io.OutputStream; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; public class AbstractServiceLauncherTestBase extends Assert implements LauncherExitCodes { @@ -57,7 +58,7 @@ public class AbstractServiceLauncherTestBase extends Assert implements * All tests have a short life. */ @Rule - public Timeout testTimeout = new Timeout(15000); + public Timeout testTimeout = new Timeout(15000, TimeUnit.MILLISECONDS); /** * Rule to provide the method name. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java index 4e83162502367..653cb38c49a63 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/PlatformAssumptions.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.test; -import org.junit.internal.AssumptionViolatedException; +import org.junit.AssumptionViolatedException; /** * JUnit assumptions for the environment (OS). diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/UnitTestcaseTimeLimit.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/UnitTestcaseTimeLimit.java index e992fea0f341a..722d0072439a7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/UnitTestcaseTimeLimit.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/UnitTestcaseTimeLimit.java @@ -21,6 +21,8 @@ import org.junit.rules.TestRule; import org.junit.rules.Timeout; +import java.util.concurrent.TimeUnit; + /** * Class for test units to extend in order that their individual tests will * be timed out and fail automatically should they run more than 10 seconds. @@ -30,5 +32,6 @@ public class UnitTestcaseTimeLimit { public final int timeOutSecs = 10; - @Rule public TestRule globalTimeout = new Timeout(timeOutSecs * 1000); + @Rule public TestRule globalTimeout = + new Timeout(timeOutSecs, TimeUnit.SECONDS); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCrcComposer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCrcComposer.java index f08702e35e596..5d8dcfb1be756 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCrcComposer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCrcComposer.java @@ -21,6 +21,7 @@ import java.io.DataInputStream; import java.io.IOException; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.test.LambdaTestUtils; import org.junit.Before; @@ -35,7 +36,7 @@ */ public class TestCrcComposer { @Rule - public Timeout globalTimeout = new Timeout(10000); + public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS); private Random rand = new Random(1234); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCrcUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCrcUtil.java index a98cb8a6757d3..b4355b1513f48 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCrcUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCrcUtil.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.test.LambdaTestUtils; import org.junit.Rule; @@ -32,7 +33,7 @@ */ public class TestCrcUtil { @Rule - public Timeout globalTimeout = new Timeout(10000); + public Timeout globalTimeout = new Timeout(10000, TimeUnit.MILLISECONDS); private Random rand = new Random(1234); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java index 94462732a5d60..082672ccd33d2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java @@ -33,6 +33,7 @@ import java.nio.file.Files; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static org.junit.Assert.assertTrue; @@ -44,8 +45,8 @@ */ public final class TestDiskCheckerWithDiskIo { @Rule - public Timeout testTimeout = new Timeout(30_000); - + public Timeout testTimeout = new Timeout(30_000, TimeUnit.MILLISECONDS); + /** * Verify DiskChecker ignores at least 2 transient file creation errors. */ diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCrc32.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCrc32.java index ecc6c906ab90d..b2d9e7420649e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCrc32.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCrc32.java @@ -28,9 +28,7 @@ import org.apache.hadoop.fs.ChecksumException; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; @@ -51,9 +49,6 @@ public class TestNativeCrc32 { private ByteBuffer data, checksums; private DataChecksum checksum; - @Rule - public ExpectedException exception = ExpectedException.none(); - @Parameters public static Collection data() { Collection params = new ArrayList(2); @@ -88,12 +83,12 @@ public void testVerifyChunkedSumsSuccess() throws ChecksumException { } @Test - public void testVerifyChunkedSumsFail() throws ChecksumException { + public void testVerifyChunkedSumsFail() { allocateDirectByteBuffers(); fillDataAndInvalidChecksums(); - exception.expect(ChecksumException.class); - NativeCrc32.verifyChunkedSums(bytesPerChecksum, checksumType.id, - checksums, data, fileName, BASE_POSITION); + assertThrows(ChecksumException.class, + () -> NativeCrc32.verifyChunkedSums(bytesPerChecksum, checksumType.id, + checksums, data, fileName, BASE_POSITION)); } @Test @@ -122,13 +117,14 @@ public void testVerifyChunkedSumsByteArraySuccess() throws ChecksumException { } @Test - public void testVerifyChunkedSumsByteArrayFail() throws ChecksumException { + public void testVerifyChunkedSumsByteArrayFail() { allocateArrayByteBuffers(); fillDataAndInvalidChecksums(); - exception.expect(ChecksumException.class); - NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, checksumType.id, - checksums.array(), checksums.position(), data.array(), data.position(), - data.remaining(), fileName, BASE_POSITION); + assertThrows(ChecksumException.class, + () -> NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, + checksumType.id, checksums.array(), checksums.position(), + data.array(), data.position(), data.remaining(), fileName, + BASE_POSITION)); } @Test @@ -177,13 +173,13 @@ public void testNativeVerifyChunkedSumsSuccess() throws ChecksumException { @Test @SuppressWarnings("deprecation") - public void testNativeVerifyChunkedSumsFail() throws ChecksumException { + public void testNativeVerifyChunkedSumsFail() { allocateDirectByteBuffers(); fillDataAndInvalidChecksums(); - exception.expect(ChecksumException.class); - NativeCrc32.nativeVerifyChunkedSums(bytesPerChecksum, checksumType.id, - checksums, checksums.position(), data, data.position(), data.remaining(), - fileName, BASE_POSITION); + assertThrows(ChecksumException.class, + () -> NativeCrc32.nativeVerifyChunkedSums(bytesPerChecksum, + checksumType.id, checksums, checksums.position(), data, + data.position(), data.remaining(), fileName, BASE_POSITION)); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java index 03c67c8263938..1d1ce893a97a2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java @@ -25,11 +25,11 @@ import java.util.HashMap; import java.util.List; -import static org.hamcrest.CoreMatchers.containsString; import static org.junit.Assert.*; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.assertj.core.api.Assertions; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; @@ -165,8 +165,8 @@ public void testLogThreadInfo() throws Exception { final String title = "title"; ReflectionUtils.logThreadInfo(logger, title, 0L); - assertThat(logCapturer.getOutput(), - containsString("Process Thread Dump: " + title)); + Assertions.assertThat(logCapturer.getOutput()) + .contains("Process Thread Dump: " + title); } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java index c9f398da563e2..6b7154b83b523 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.util; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import org.apache.commons.io.FileUtils; import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider; @@ -53,7 +54,7 @@ public class TestShell extends Assert { * Set the timeout for every test */ @Rule - public Timeout testTimeout = new Timeout(30000); + public Timeout testTimeout = new Timeout(30000, TimeUnit.MILLISECONDS); @Rule public TestName methodName = new TestName(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java index baf4251c3e50e..00e36ee8fa941 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java @@ -20,7 +20,6 @@ import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows; import static org.junit.Assert.*; -import static org.junit.matchers.JUnitMatchers.containsString; import java.io.File; import java.io.FileInputStream; @@ -31,14 +30,13 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.test.GenericTestUtils; +import org.assertj.core.api.Assertions; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.hamcrest.CoreMatchers.*; - /** * Test cases for helper Windows winutils.exe utility. */ @@ -496,12 +494,12 @@ public void testReadLink() throws IOException { String readLinkOutput = Shell.execCommand(winutils, "readlink", dirLink.toString()); - assertThat(readLinkOutput, equalTo(dir1.toString())); + Assertions.assertThat(readLinkOutput).isEqualTo(dir1.toString()); readLinkOutput = Shell.execCommand(winutils, "readlink", fileLink.toString()); - assertThat(readLinkOutput, equalTo(file1.toString())); + Assertions.assertThat(readLinkOutput).isEqualTo(file1.toString()); // Try a few invalid inputs and verify we get an ExitCodeException for each. // @@ -511,7 +509,7 @@ public void testReadLink() throws IOException { Shell.execCommand(winutils, "readlink", ""); fail("Failed to get Shell.ExitCodeException when reading bad symlink"); } catch (Shell.ExitCodeException ece) { - assertThat(ece.getExitCode(), is(1)); + Assertions.assertThat(ece.getExitCode()).isEqualTo(1); } try { @@ -520,7 +518,7 @@ public void testReadLink() throws IOException { Shell.execCommand(winutils, "readlink", "ThereIsNoSuchLink"); fail("Failed to get Shell.ExitCodeException when reading bad symlink"); } catch (Shell.ExitCodeException ece) { - assertThat(ece.getExitCode(), is(1)); + Assertions.assertThat(ece.getExitCode()).isEqualTo(1); } try { @@ -529,7 +527,7 @@ public void testReadLink() throws IOException { Shell.execCommand(winutils, "readlink", dir1.toString()); fail("Failed to get Shell.ExitCodeException when reading bad symlink"); } catch (Shell.ExitCodeException ece) { - assertThat(ece.getExitCode(), is(1)); + Assertions.assertThat(ece.getExitCode()).isEqualTo(1); } try { @@ -538,7 +536,7 @@ public void testReadLink() throws IOException { Shell.execCommand(winutils, "readlink", file1.toString()); fail("Failed to get Shell.ExitCodeException when reading bad symlink"); } catch (Shell.ExitCodeException ece) { - assertThat(ece.getExitCode(), is(1)); + Assertions.assertThat(ece.getExitCode()).isEqualTo(1); } try { @@ -547,11 +545,10 @@ public void testReadLink() throws IOException { Shell.execCommand(winutils, "readlink", "a", "b"); fail("Failed to get Shell.ExitCodeException with bad parameters"); } catch (Shell.ExitCodeException ece) { - assertThat(ece.getExitCode(), is(1)); + Assertions.assertThat(ece.getExitCode()).isEqualTo(1); } } - @SuppressWarnings("deprecation") @Test(timeout=10000) public void testTaskCreate() throws IOException { requireWinutils(); @@ -570,8 +567,8 @@ public void testTaskCreate() throws IOException { assertTrue(proof.exists()); String outNumber = FileUtils.readFileToString(proof); - - assertThat(outNumber, containsString(testNumber)); + + Assertions.assertThat(outNumber).contains(testNumber); } @Test (timeout = 30000) @@ -604,7 +601,7 @@ public void testTaskCreateWithLimits() throws IOException { + jobId, "java -Xmx256m -version"); fail("Failed to get Shell.ExitCodeException with insufficient memory"); } catch (Shell.ExitCodeException ece) { - assertThat(ece.getExitCode(), is(1)); + Assertions.assertThat(ece.getExitCode()).isEqualTo(1); } // Run tasks with wrong parameters @@ -615,7 +612,7 @@ public void testTaskCreateWithLimits() throws IOException { "-1", "foo", "job" + jobId, "cmd /c echo job" + jobId); fail("Failed to get Shell.ExitCodeException with bad parameters"); } catch (Shell.ExitCodeException ece) { - assertThat(ece.getExitCode(), is(1639)); + Assertions.assertThat(ece.getExitCode()).isEqualTo(1639); } try { @@ -624,7 +621,7 @@ public void testTaskCreateWithLimits() throws IOException { "job" + jobId, "cmd /c echo job" + jobId); fail("Failed to get Shell.ExitCodeException with bad parameters"); } catch (Shell.ExitCodeException ece) { - assertThat(ece.getExitCode(), is(1639)); + Assertions.assertThat(ece.getExitCode()).isEqualTo(1639); } try { @@ -633,7 +630,7 @@ public void testTaskCreateWithLimits() throws IOException { "job" + jobId, "cmd /c echo job" + jobId); fail("Failed to get Shell.ExitCodeException with bad parameters"); } catch (Shell.ExitCodeException ece) { - assertThat(ece.getExitCode(), is(1639)); + Assertions.assertThat(ece.getExitCode()).isEqualTo(1639); } } } From 23eca6d0e39e0010fd691ca430a949091ba7d1a1 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Thu, 13 May 2021 20:30:00 +0800 Subject: [PATCH 314/666] HADOOP-16822. Provide source artifacts for hadoop-client-api. Contributed by Karel Kolman. --- hadoop-client-modules/hadoop-client-api/pom.xml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml b/hadoop-client-modules/hadoop-client-api/pom.xml index e8ac19e4f3c79..1a837434279a1 100644 --- a/hadoop-client-modules/hadoop-client-api/pom.xml +++ b/hadoop-client-modules/hadoop-client-api/pom.xml @@ -87,6 +87,10 @@ org.apache.maven.plugins maven-shade-plugin + + true + true + org.apache.hadoop From 163912fa7dbe271f272af78f9de3d5c9a9e08ce7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 May 2021 05:51:43 -0700 Subject: [PATCH 315/666] Bump acorn (#3003) Bumps [acorn](https://github.com/acornjs/acorn) from 5.7.3 to 5.7.4. - [Release notes](https://github.com/acornjs/acorn/releases) - [Commits](https://github.com/acornjs/acorn/compare/5.7.3...5.7.4) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Reviewed-by: Gergely Pollak --- .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock index 1d36e689da053..026b2d73b5f64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock @@ -26,9 +26,9 @@ accepts@~1.3.5, accepts@~1.3.7: negotiator "0.6.2" acorn@^5.2.1: - version "5.7.3" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-5.7.3.tgz#67aa231bf8812974b85235a96771eb6bd07ea279" - integrity sha512-T/zvzYRfbVojPWahDsE5evJdHb3oJoQfFbsrKM7w5Zcs++Tr257tia3BmMP8XYVjp1S9RZXQMh7gao96BlqZOw== + version "5.7.4" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-5.7.4.tgz#3e8d8a9947d0599a1796d10225d7432f4a4acf5e" + integrity sha512-1D++VG7BhrtvQpNbBzovKNc1FLGGEE/oGe7b9xJm/RFHMBeUaUGpluV9RLjZa47YFdPcDAenEYuq9pQPcMdLJg== after@0.8.1: version "0.8.1" From f4218f1cfb47371cadcd30174e6442612cfea4ec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 May 2021 05:54:07 -0700 Subject: [PATCH 316/666] Bump underscore (#2997) Bumps [underscore](https://github.com/jashkenas/underscore) from 1.9.1 to 1.13.1. - [Release notes](https://github.com/jashkenas/underscore/releases) - [Commits](https://github.com/jashkenas/underscore/compare/1.9.1...1.13.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Reviewed-by: Gergely Pollak --- .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock index 026b2d73b5f64..8f5a4f6e7c4d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock @@ -6628,9 +6628,9 @@ underscore.string@~3.3.4: util-deprecate "^1.0.2" underscore@>=1.8.3: - version "1.9.1" - resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.9.1.tgz#06dce34a0e68a7babc29b365b8e74b8925203961" - integrity sha512-5/4etnCkd9c8gwgowi5/om/mYO5ajCaOgdzj/oW+0eQV9WxKBDZw5+ycmKmeaTXjInS/W0BzpGLo2xR2aBwZdg== + version "1.13.1" + resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.13.1.tgz#0c1c6bd2df54b6b69f2314066d65b6cde6fcf9d1" + integrity sha512-hzSoAVtJF+3ZtiFX0VgfFPHEDRm7Y/QPjGyNo4TVdnDTdft3tr8hEkD25a1jC+TjTuE7tkHGKkhwCgs9dgBB2g== universalify@^0.1.0: version "0.1.2" From 868ad41fdf3218c3d450fa4a85bbfae912408ab0 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Thu, 13 May 2021 21:58:27 +0530 Subject: [PATCH 317/666] HADOOP-17693. Dockerfile for building on Centos 8 (#3006) --- dev-support/docker/Dockerfile_centos_8 | 173 +++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 dev-support/docker/Dockerfile_centos_8 diff --git a/dev-support/docker/Dockerfile_centos_8 b/dev-support/docker/Dockerfile_centos_8 new file mode 100644 index 0000000000000..ddf60b7fc049c --- /dev/null +++ b/dev-support/docker/Dockerfile_centos_8 @@ -0,0 +1,173 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Dockerfile for installing the necessary dependencies for building Hadoop. +# See BUILDING.txt. + +FROM centos:8 + +WORKDIR /root + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +RUN yum update -y \ + && yum install -y \ + ant \ + bzip2 \ + bzip2-devel \ + clang \ + curl \ + cyrus-sasl-devel \ + dnf \ + fuse \ + git \ + libcurl-devel \ + fuse \ + fuse-libs \ + fuse-devel \ + libtool \ + libtirpc-devel \ + lz4-devel \ + make \ + openssl-devel \ + pinentry-curses \ + pkg-config \ + python3 \ + python3-pip \ + python3-setuptools \ + python3-wheel \ + rsync \ + sudo \ + valgrind \ + zlib-devel + +#### +# Install EPEL +#### +RUN mkdir -p /tmp/epel \ + && curl -L -s -S https://download-ib01.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \ + -o /tmp/epel/epel-release-latest-8.noarch.rpm \ + && rpm -Uvh /tmp/epel/epel-release-latest-8.noarch.rpm + +RUN dnf --enablerepo=powertools install -y \ + doxygen \ + snappy-devel + +RUN dnf install -y \ + bouncycastle \ + gcc-toolset-9-gcc \ + gcc-toolset-9-gcc-c++ \ + nodejs \ + npm \ + libpmem-devel + +# Set GCC 9 as the default C/C++ compiler +RUN echo "source /opt/rh/gcc-toolset-9/enable" >> /etc/bashrc +SHELL ["/bin/bash", "--login", "-c"] + +#### +# Install Maven 3.6.3 +#### +RUN mkdir -p /opt/maven /tmp/maven \ + && curl -L -s -S https://mirrors.estointernet.in/apache/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz \ + -o /tmp/maven/apache-maven-3.6.3-bin.tar.gz \ + && tar xzf /tmp/maven/apache-maven-3.6.3-bin.tar.gz --strip-components 1 -C /opt/maven + +#### +# Install CMake 3.19 +#### +# hadolint ignore=DL3003 +RUN mkdir -p /tmp/cmake /opt/cmake \ + && curl -L -s -S https://cmake.org/files/v3.19/cmake-3.19.0.tar.gz -o /tmp/cmake/cmake-3.19.0.tar.gz \ + && tar xzf /tmp/cmake/cmake-3.19.0.tar.gz --strip-components 1 -C /opt/cmake \ + && cd /opt/cmake || exit && ./bootstrap \ + && make "-j$(nproc)" \ + && make install \ + && cd /root || exit + +#### +# Install zstandard +#### +# hadolint ignore=DL3003 +RUN mkdir -p /opt/zstd /tmp/zstd \ + && curl -L -s -S https://github.com/facebook/zstd/archive/refs/tags/v1.4.9.tar.gz -o /tmp/zstd/v1.4.9.tar.gz \ + && tar xzf /tmp/zstd/v1.4.9.tar.gz --strip-components 1 -C /opt/zstd \ + && cd /opt/zstd || exit \ + && make "-j$(nproc)" \ + && make install \ + && cd /root || exit + +###### +# Set env vars required to build Hadoop +###### +ENV MAVEN_HOME /opt/maven/apache-maven-3.6.3 +ENV PATH "${PATH}:${MAVEN_HOME}/bin" +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-1.8.0 + +####### +# Install SpotBugs 4.2.2 +####### +RUN mkdir -p /opt/spotbugs \ + && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ + -o /opt/spotbugs.tgz \ + && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ + && chmod +x /opt/spotbugs/bin/* +ENV SPOTBUGS_HOME /opt/spotbugs + +####### +# Install Boost 1.72 +####### +# hadolint ignore=DL3003 +RUN mkdir -p /opt/boost-library \ + && curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ + && mv boost_1_72_0.tar.bz2 /opt/boost-library \ + && cd /opt/boost-library \ + && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ + && cd /opt/boost-library/boost_1_72_0 \ + && ./bootstrap.sh --prefix=/usr/ \ + && ./b2 --without-python install \ + && cd /root \ + && rm -rf /opt/boost-library + +###### +# Install Google Protobuf 3.7.1 +###### +# hadolint ignore=DL3003 +RUN mkdir -p /opt/protobuf-src \ + && curl -L -s -S \ + https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \ + -o /opt/protobuf.tar.gz \ + && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \ + && cd /opt/protobuf-src \ + && ./configure --prefix=/opt/protobuf \ + && make "-j$(nproc)" \ + && make install \ + && cd /root \ + && rm -rf /opt/protobuf-src +ENV PROTOBUF_HOME /opt/protobuf +ENV PATH "${PATH}:/opt/protobuf/bin" + +#### +# Install pylint and python-dateutil +#### +RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 + +#### +# Install bower +#### +# hadolint ignore=DL3008 +RUN npm install -g bower@1.8.8 From c5f66b1fe74b629e7384b510777f154dc43a5836 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 14 May 2021 00:15:33 +0530 Subject: [PATCH 318/666] MAPREDUCE-7343. Increase the job name max length in mapred job -list. (#2995). Contributed by Ayush Saxena. --- .../main/java/org/apache/hadoop/mapreduce/tools/CLI.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java index 82e73463c4cc3..530ee91184743 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java @@ -771,10 +771,10 @@ public void displayJobList(JobStatus[] jobs) } @Private - public static String headerPattern = "%23s\t%20s\t%10s\t%14s\t%12s\t%12s" + + public static String headerPattern = "%23s\t%40s\t%10s\t%14s\t%12s\t%12s" + "\t%10s\t%15s\t%15s\t%8s\t%8s\t%10s\t%10s\n"; @Private - public static String dataPattern = "%23s\t%20s\t%10s\t%14d\t%12s\t%12s" + + public static String dataPattern = "%23s\t%40s\t%10s\t%14d\t%12s\t%12s" + "\t%10s\t%15s\t%15s\t%8s\t%8s\t%10s\t%10s\n"; private static String memPattern = "%dM"; private static String UNAVAILABLE = "N/A"; @@ -792,8 +792,8 @@ public void displayJobList(JobStatus[] jobs, PrintWriter writer) { long rsvdMem = job.getReservedMem(); long neededMem = job.getNeededMem(); int jobNameLength = job.getJobName().length(); - writer.printf(dataPattern, job.getJobID().toString(), - job.getJobName().substring(0, jobNameLength > 20 ? 20 : jobNameLength), + writer.printf(dataPattern, job.getJobID().toString(), job.getJobName() + .substring(0, jobNameLength > 40 ? 40 : jobNameLength), job.getState(), job.getStartTime(), job.getUsername(), job.getQueue(), job.getPriority().name(), numUsedSlots < 0 ? UNAVAILABLE : numUsedSlots, From ba60b67151c871f0a28a0f524df463dca72bd77f Mon Sep 17 00:00:00 2001 From: zhuqi Date: Fri, 14 May 2021 13:12:28 +0800 Subject: [PATCH 319/666] YARN-10737: Fix typos in CapacityScheduler#schedule. (#2911) Contributed by Qi Zhu. --- .../scheduler/capacity/CapacityScheduler.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 1e81f67df9a7c..ebf4b9d50af93 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -605,10 +605,10 @@ static void schedule(CapacityScheduler cs) throws InterruptedException{ // First randomize the start point int start = random.nextInt(partitionSize); // Allocate containers of partition [start, end) - for (String partititon : partitions) { + for (String partition : partitions) { if (current++ >= start) { CandidateNodeSet candidates = - cs.getCandidateNodeSet(partititon); + cs.getCandidateNodeSet(partition); if (candidates == null) { continue; } @@ -619,12 +619,12 @@ static void schedule(CapacityScheduler cs) throws InterruptedException{ current = 0; // Allocate containers of partition [0, start) - for (String partititon : partitions) { + for (String partition : partitions) { if (current++ > start) { break; } CandidateNodeSet candidates = - cs.getCandidateNodeSet(partititon); + cs.getCandidateNodeSet(partition); if (candidates == null) { continue; } From 63e387cbff699b83c2c268bfcae8370e716b0f02 Mon Sep 17 00:00:00 2001 From: zhuqi Date: Fri, 14 May 2021 13:36:07 +0800 Subject: [PATCH 320/666] YARN-10761: Add more event type to RM Dispatcher event metrics. Contributed by Qi Zhu. --- .../resourcemanager/ResourceManager.java | 38 ++++++++++++++++--- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 9b47431d76dc2..b0dc218f3dd15 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -475,12 +475,40 @@ protected EventHandler createSchedulerEventDispatcher() { protected Dispatcher createDispatcher() { AsyncDispatcher dispatcher = new AsyncDispatcher("RM Event dispatcher"); - GenericEventTypeMetrics genericEventTypeMetrics = + + // Add 4 busy event types. + GenericEventTypeMetrics + nodesListManagerEventTypeMetrics = + GenericEventTypeMetricsManager. + create(dispatcher.getName(), NodesListManagerEventType.class); + dispatcher.addMetrics(nodesListManagerEventTypeMetrics, + nodesListManagerEventTypeMetrics + .getEnumClass()); + + GenericEventTypeMetrics + rmNodeEventTypeMetrics = + GenericEventTypeMetricsManager. + create(dispatcher.getName(), RMNodeEventType.class); + dispatcher.addMetrics(rmNodeEventTypeMetrics, + rmNodeEventTypeMetrics + .getEnumClass()); + + GenericEventTypeMetrics + rmAppEventTypeMetrics = + GenericEventTypeMetricsManager. + create(dispatcher.getName(), RMAppEventType.class); + dispatcher.addMetrics(rmAppEventTypeMetrics, + rmAppEventTypeMetrics + .getEnumClass()); + + GenericEventTypeMetrics + rmAppAttemptEventTypeMetrics = GenericEventTypeMetricsManager. - create(dispatcher.getName(), NodesListManagerEventType.class); - // We can add more - dispatcher.addMetrics(genericEventTypeMetrics, - genericEventTypeMetrics.getEnumClass()); + create(dispatcher.getName(), RMAppAttemptEventType.class); + dispatcher.addMetrics(rmAppAttemptEventTypeMetrics, + rmAppAttemptEventTypeMetrics + .getEnumClass()); + return dispatcher; } From ad9fedfc39e6bf198002cb1d0698ea0798f4c76a Mon Sep 17 00:00:00 2001 From: kyoty Date: Fri, 14 May 2021 23:57:54 +0800 Subject: [PATCH 321/666] YARN-10545. Improve the readability of diagnostics log in yarn-ui2 web page. #2540 Contributed by kyoty. --- .../src/main/webapp/app/templates/yarn-app/info.hbs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs index 4c5da63ed5089..87e581383b895 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs @@ -42,7 +42,7 @@

    Diagnostics
    -
    {{model.app.diagnostics}}
    +
    {{model.app.diagnostics}}
    {{else}}
    From c77a7a65bec6afe20e7d71b81c08ca0d734e6928 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Sun, 16 May 2021 00:45:11 +0530 Subject: [PATCH 322/666] HDFS-15988. Stabilise HDFS Pre-Commit. (#2860). Contributed by Ayush Saxena. Signed-off-by: He Xiaoqiao --- dev-support/bin/hadoop.sh | 3 +- dev-support/docker/Dockerfile | 2 +- .../apache/hadoop/hdfs/TestPersistBlocks.java | 3 ++ .../server/datanode/TestDirectoryScanner.java | 52 +++++++++++-------- .../namenode/ha/TestPipelinesFailover.java | 2 +- .../snapshot/TestNestedSnapshots.java | 13 +++-- 6 files changed, 44 insertions(+), 31 deletions(-) diff --git a/dev-support/bin/hadoop.sh b/dev-support/bin/hadoop.sh index beebea8c97f6b..28d3ad26f2825 100755 --- a/dev-support/bin/hadoop.sh +++ b/dev-support/bin/hadoop.sh @@ -355,6 +355,7 @@ function personality_modules fi ;; unit) + extra="-Dsurefire.rerunFailingTestsCount=2" if [[ "${BUILDMODE}" = full ]]; then ordering=mvnsrc elif [[ "${CHANGED_MODULES[*]}" =~ \. ]]; then @@ -363,7 +364,7 @@ function personality_modules if [[ ${TEST_PARALLEL} = "true" ]] ; then if hadoop_test_parallel; then - extra="-Pparallel-tests" + extra="${extra} -Pparallel-tests" if [[ -n ${TEST_THREADS:-} ]]; then extra="${extra} -DtestsThreadCount=${TEST_THREADS}" fi diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 11f3c6f739b86..51743039e02d2 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -179,7 +179,7 @@ RUN mkdir -p /opt/isa-l-src \ ### # Avoid out of memory errors in builds ### -ENV MAVEN_OPTS -Xms256m -Xmx1536m +ENV MAVEN_OPTS -Xms256m -Xmx3072m # Skip gpg verification when downloading Yetus via yetus-wrapper ENV HADOOP_SKIP_YETUS_VERIFICATION true diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java index c498160950ead..3658d75107abd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -97,12 +98,14 @@ void testRestartDfs(boolean useFlush) throws Exception { conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); + conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true); MiniDFSCluster cluster = null; long len = 0; FSDataOutputStream stream; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); // Creating a file with 4096 blockSize to write multiple blocks stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index e2a15a8da039a..7f79778842780 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -91,7 +91,6 @@ public class TestDirectoryScanner { private static final Logger LOG = LoggerFactory.getLogger(TestDirectoryScanner.class); - private static final Configuration CONF = new HdfsConfiguration(); private static final int DEFAULT_GEN_STAMP = 9999; private MiniDFSCluster cluster; @@ -103,12 +102,14 @@ public class TestDirectoryScanner { private final Random r = new Random(); private static final int BLOCK_LENGTH = 100; - static { - CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_LENGTH); - CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1); - CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + public Configuration getConfiguration() { + Configuration configuration = new HdfsConfiguration(); + configuration.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_LENGTH); + configuration.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1); + configuration.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); + configuration.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, getMemlockLimit(Long.MAX_VALUE)); + return configuration; } @Before @@ -361,7 +362,8 @@ private void verifyStats(long totalBlocks, int diffsize, long missingMetaFile, @Test(timeout = 300000) public void testRetainBlockOnPersistentStorage() throws Exception { - cluster = new MiniDFSCluster.Builder(CONF) + Configuration conf = getConfiguration(); + cluster = new MiniDFSCluster.Builder(conf) .storageTypes( new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT }) .numDataNodes(1).build(); @@ -370,7 +372,7 @@ public void testRetainBlockOnPersistentStorage() throws Exception { bpid = cluster.getNamesystem().getBlockPoolId(); fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); client = cluster.getFileSystem().getClient(); - scanner = new DirectoryScanner(fds, CONF); + scanner = new DirectoryScanner(fds, conf); scanner.setRetainDiffs(true); FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0)); @@ -413,8 +415,9 @@ public void testScanDirectoryStructureWarn() throws Exception { new WriterAppender(new SimpleLayout(), loggerStream); rootLogger.addAppender(writerAppender); + Configuration conf = getConfiguration(); cluster = new MiniDFSCluster - .Builder(CONF) + .Builder(conf) .storageTypes(new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT }) .numDataNodes(1) @@ -424,7 +427,7 @@ public void testScanDirectoryStructureWarn() throws Exception { bpid = cluster.getNamesystem().getBlockPoolId(); fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); client = cluster.getFileSystem().getClient(); - scanner = new DirectoryScanner(fds, CONF); + scanner = new DirectoryScanner(fds, conf); scanner.setRetainDiffs(true); FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0)); @@ -464,7 +467,8 @@ public void testScanDirectoryStructureWarn() throws Exception { @Test(timeout = 300000) public void testDeleteBlockOnTransientStorage() throws Exception { - cluster = new MiniDFSCluster.Builder(CONF) + Configuration conf = getConfiguration(); + cluster = new MiniDFSCluster.Builder(conf) .storageTypes( new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT }) .numDataNodes(1).build(); @@ -473,7 +477,7 @@ public void testDeleteBlockOnTransientStorage() throws Exception { bpid = cluster.getNamesystem().getBlockPoolId(); fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); client = cluster.getFileSystem().getClient(); - scanner = new DirectoryScanner(fds, CONF); + scanner = new DirectoryScanner(fds, conf); scanner.setRetainDiffs(true); FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0)); @@ -512,16 +516,17 @@ public void testDirectoryScanner() throws Exception { } public void runTest(int parallelism) throws Exception { - cluster = new MiniDFSCluster.Builder(CONF).build(); + Configuration conf = getConfiguration(); + cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); bpid = cluster.getNamesystem().getBlockPoolId(); fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); client = cluster.getFileSystem().getClient(); - CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, + conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, parallelism); - scanner = new DirectoryScanner(fds, CONF); + scanner = new DirectoryScanner(fds, conf); scanner.setRetainDiffs(true); // Add files with 100 blocks @@ -672,9 +677,9 @@ public void runTest(int parallelism) throws Exception { * * @throws Exception thrown on unexpected failure */ - @Test(timeout = 600000) + @Test public void testThrottling() throws Exception { - Configuration conf = new Configuration(CONF); + Configuration conf = new Configuration(getConfiguration()); // We need lots of blocks so the report compiler threads have enough to // keep them busy while we watch them. @@ -714,7 +719,7 @@ public void testThrottling() throws Exception { // Waiting should be about 9x running. LOG.info("RATIO: " + ratio); assertTrue("Throttle is too restrictive", ratio <= 10f); - assertTrue("Throttle is too permissive", ratio >= 7f); + assertTrue("Throttle is too permissive" + ratio, ratio >= 7f); // Test with a different limit conf.setInt( @@ -754,7 +759,7 @@ public void testThrottling() throws Exception { assertTrue("Throttle is too permissive", ratio >= 7f); // Test with no limit - scanner = new DirectoryScanner(fds, CONF); + scanner = new DirectoryScanner(fds, getConfiguration()); scanner.setRetainDiffs(true); scan(blocks, 0, 0, 0, 0, 0); scanner.shutdown(); @@ -1095,13 +1100,14 @@ public void TestScanInfo() throws Exception { */ @Test(timeout = 60000) public void testExceptionHandlingWhileDirectoryScan() throws Exception { - cluster = new MiniDFSCluster.Builder(CONF).build(); + Configuration conf = getConfiguration(); + cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); bpid = cluster.getNamesystem().getBlockPoolId(); fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); client = cluster.getFileSystem().getClient(); - CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1); // Add files with 2 blocks createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 2, false); @@ -1121,7 +1127,7 @@ public void testExceptionHandlingWhileDirectoryScan() throws Exception { FsDatasetSpi spyFds = Mockito.spy(fds); Mockito.doReturn(volReferences).when(spyFds).getFsVolumeReferences(); - scanner = new DirectoryScanner(spyFds, CONF); + scanner = new DirectoryScanner(spyFds, conf); scanner.setRetainDiffs(true); scanner.reconcile(); } finally { @@ -1135,7 +1141,7 @@ public void testExceptionHandlingWhileDirectoryScan() throws Exception { @Test public void testDirectoryScannerInFederatedCluster() throws Exception { - HdfsConfiguration conf = new HdfsConfiguration(CONF); + HdfsConfiguration conf = new HdfsConfiguration(getConfiguration()); // Create Federated cluster with two nameservices and one DN try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java index 292f7abe6c2d6..260dd7049d5cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java @@ -324,7 +324,7 @@ public void testLeaseRecoveryAfterFailover() throws Exception { * DN running the recovery should then fail to commit the synchronization * and a later retry will succeed. */ - @Test(timeout=30000) + @Test(timeout=60000) public void testFailoverRightBeforeCommitSynchronization() throws Exception { final Configuration conf = new Configuration(); // Disable permissions so that another user can recover the lease. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java index ed570492f7783..67567275e548b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import static org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature.SNAPSHOT_QUOTA_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_MAX_LIMIT; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -61,6 +61,7 @@ public class TestNestedSnapshots { private static final short REPLICATION = 3; private static final long BLOCKSIZE = 1024; + private static final int SNAPSHOTLIMIT = 100; private static final Configuration conf = new Configuration(); private static MiniDFSCluster cluster; @@ -68,6 +69,7 @@ public class TestNestedSnapshots { @Before public void setUp() throws Exception { + conf.setInt(DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, SNAPSHOTLIMIT); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .build(); cluster.waitActive(); @@ -199,7 +201,7 @@ private static void assertFile(Path s1, Path s2, Path file, * Test the snapshot limit of a single snapshottable directory. * @throws Exception */ - @Test (timeout=300000) + @Test (timeout=600000) public void testSnapshotLimit() throws Exception { final int step = 1000; final String dirStr = "/testSnapshotLimit/dir"; @@ -208,7 +210,8 @@ public void testSnapshotLimit() throws Exception { hdfs.allowSnapshot(dir); int s = 0; - for(; s < SNAPSHOT_QUOTA_DEFAULT; s++) { + for(; s < SNAPSHOTLIMIT; s++) { + SnapshotTestHelper.LOG.info("Creating snapshot number: {}", s); final String snapshotName = "s" + s; hdfs.createSnapshot(dir, snapshotName); @@ -226,10 +229,10 @@ public void testSnapshotLimit() throws Exception { SnapshotTestHelper.LOG.info("The exception is expected.", ioe); } - for(int f = 0; f < SNAPSHOT_QUOTA_DEFAULT; f += step) { + for(int f = 0; f < SNAPSHOTLIMIT; f += step) { final String file = "f" + f; s = RANDOM.nextInt(step); - for(; s < SNAPSHOT_QUOTA_DEFAULT; s += RANDOM.nextInt(step)) { + for(; s < SNAPSHOTLIMIT; s += RANDOM.nextInt(step)) { final Path p = SnapshotTestHelper.getSnapshotPath(dir, "s" + s, file); //the file #f exists in snapshot #s iff s > f. Assert.assertEquals(s > f, hdfs.exists(p)); From 0f81c4914478b8cec81bac5cc72ff288f74a65a8 Mon Sep 17 00:00:00 2001 From: lujiefsi Date: Sun, 16 May 2021 23:44:38 -0500 Subject: [PATCH 323/666] YARN-10555. Missing access check before getAppAttempts (#2608) Co-authored-by: lujie Signed-off-by: Akira Ajisaka --- .../yarn/server/resourcemanager/webapp/RMAppBlock.java | 2 +- .../yarn/server/resourcemanager/webapp/RMWebServices.java | 2 +- .../server/resourcemanager/webapp/dao/AppAttemptInfo.java | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java index d2604009f0e4a..838847a362a53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java @@ -142,7 +142,7 @@ protected void generateApplicationTable(Block html, continue; } AppAttemptInfo attemptInfo = - new AppAttemptInfo(this.rm, rmAppAttempt, rmApp.getUser(), + new AppAttemptInfo(this.rm, rmAppAttempt, true, rmApp.getUser(), WebAppUtils.getHttpSchemePrefix(conf)); Set nodes = rmAppAttempt.getBlacklistedNodes(); // nodes which are blacklisted by the application diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index 2a725cf20c0bd..d5958f5bf3018 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -1127,7 +1127,7 @@ public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest hsr, AppAttemptsInfo appAttemptsInfo = new AppAttemptsInfo(); for (RMAppAttempt attempt : app.getAppAttempts().values()) { AppAttemptInfo attemptInfo = new AppAttemptInfo(rm, attempt, - app.getUser(), hsr.getScheme() + "://"); + hasAccess(app, hsr), app.getUser(), hsr.getScheme() + "://"); appAttemptsInfo.add(attemptInfo); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java index 66381f4e7eeef..00454afe0e5f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java @@ -51,8 +51,8 @@ public class AppAttemptInfo { public AppAttemptInfo() { } - public AppAttemptInfo(ResourceManager rm, RMAppAttempt attempt, String user, - String schemePrefix) { + public AppAttemptInfo(ResourceManager rm, RMAppAttempt attempt, + Boolean hasAccess, String user, String schemePrefix) { this.startTime = 0; this.containerId = ""; this.nodeHttpAddress = ""; @@ -65,7 +65,7 @@ public AppAttemptInfo(ResourceManager rm, RMAppAttempt attempt, String user, this.startTime = attempt.getStartTime(); this.finishedTime = attempt.getFinishTime(); Container masterContainer = attempt.getMasterContainer(); - if (masterContainer != null) { + if (masterContainer != null && hasAccess) { this.containerId = masterContainer.getId().toString(); this.nodeHttpAddress = masterContainer.getNodeHttpAddress(); this.nodeId = masterContainer.getNodeId().toString(); From 3ee42c52bc74e9b21b9dfa49ac6c52c8cec7db5a Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Mon, 17 May 2021 13:30:12 +0200 Subject: [PATCH 324/666] YARN-10763. Add the number of containers assigned per second metrics to ClusterMetrics. Contributed by chaosju. --- .../resourcemanager/ClusterMetrics.java | 37 +++++++++++++++++++ .../scheduler/AppSchedulingInfo.java | 2 + .../webapp/dao/ClusterMetricsInfo.java | 8 ++++ .../resourcemanager/TestClusterMetrics.java | 17 +++++++++ .../webapp/TestRMWebServices.java | 2 +- 5 files changed, 65 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java index a02eeef28b7ca..67a3a620875d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java @@ -21,8 +21,12 @@ import static org.apache.hadoop.metrics2.lib.Interns.info; import java.util.Map; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsSystem; @@ -66,6 +70,8 @@ public class ClusterMetrics { rmEventProcCPUAvg; @Metric("RM Event Processor CPU Usage 60 second Max") MutableGaugeLong rmEventProcCPUMax; + @Metric("# of Containers assigned in the last second") MutableGaugeInt + containerAssignedPerSecond; private boolean rmEventProcMonitorEnable = false; @@ -85,6 +91,22 @@ public class ClusterMetrics { private static volatile ClusterMetrics INSTANCE = null; private static MetricsRegistry registry; + private AtomicInteger numContainersAssigned = new AtomicInteger(0); + private ScheduledThreadPoolExecutor assignCounterExecutor; + + ClusterMetrics() { + assignCounterExecutor = new ScheduledThreadPoolExecutor(1, + new ThreadFactoryBuilder(). + setDaemon(true).setNameFormat("ContainerAssignmentCounterThread"). + build()); + assignCounterExecutor.scheduleAtFixedRate(new Runnable() { + @Override + public void run() { + containerAssignedPerSecond.set(numContainersAssigned.getAndSet(0)); + } + }, 1, 1, TimeUnit.SECONDS); + } + public static ClusterMetrics getMetrics() { if(!isInitialized.get()){ synchronized (ClusterMetrics.class) { @@ -120,6 +142,9 @@ private static void registerMetrics() { @VisibleForTesting public synchronized static void destroy() { + if (INSTANCE != null && INSTANCE.getAssignCounterExecutor() != null) { + INSTANCE.getAssignCounterExecutor().shutdownNow(); + } isInitialized.set(false); INSTANCE = null; } @@ -319,4 +344,16 @@ public long getUtilizedVirtualCores() { public void incrUtilizedVirtualCores(long delta) { utilizedVirtualCores.incr(delta); } + + public int getContainerAssignedPerSecond() { + return containerAssignedPerSecond.value(); + } + + public void incrNumContainerAssigned() { + numContainersAssigned.incrementAndGet(); + } + + private ScheduledThreadPoolExecutor getAssignCounterExecutor(){ + return assignCounterExecutor; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java index a3b235bc3c97b..2b2f8324655be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java @@ -33,6 +33,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; +import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -773,6 +774,7 @@ public static void updateMetrics(ApplicationId applicationId, NodeType type, containerAllocated.getContainer().getResource()); } queue.getMetrics().incrNodeTypeAggregations(user, type); + ClusterMetrics.getMetrics().incrNumContainerAssigned(); } // Get AppPlacementAllocator by specified schedulerKey diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java index 779d233042637..7dc2d8ac1ebbf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java @@ -67,6 +67,8 @@ public class ClusterMetricsInfo { private int activeNodes; private int shutdownNodes; + private int containerAssignedPerSecond; + // Total used resource of the cluster, including all partitions private ResourceInfo totalUsedResourcesAcrossPartition; @@ -158,6 +160,8 @@ public ClusterMetricsInfo(final ResourceScheduler rs) { this.shutdownNodes = clusterMetrics.getNumShutdownNMs(); this.totalNodes = activeNodes + lostNodes + decommissionedNodes + rebootedNodes + unhealthyNodes + decommissioningNodes + shutdownNodes; + this.containerAssignedPerSecond = clusterMetrics + .getContainerAssignedPerSecond(); } public int getAppsSubmitted() { @@ -411,4 +415,8 @@ public int getTotalAllocatedContainersAcrossPartition() { public boolean getCrossPartitionMetricsAvailable() { return crossPartitionMetricsAvailable; } + + public int getContainerAssignedPerSecond() { + return this.containerAssignedPerSecond; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClusterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClusterMetrics.java index d81e27860ec2c..645a2f1c40e59 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClusterMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClusterMetrics.java @@ -20,11 +20,14 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Assert; import org.junit.Test; +import java.util.function.Supplier; + public class TestClusterMetrics { private ClusterMetrics metrics; @@ -63,4 +66,18 @@ public void tearDown() { DefaultMetricsSystem.shutdown(); } } + + @Test + public void testClusterMetrics() throws Exception { + Assert.assertTrue(!metrics.containerAssignedPerSecond.changed()); + metrics.incrNumContainerAssigned(); + metrics.incrNumContainerAssigned(); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return metrics.getContainerAssignedPerSecond() == 2; + } + }, 500, 5000); + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java index 02094327f82bc..b651c7959a42f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java @@ -474,7 +474,7 @@ public void verifyClusterMetricsJSON(JSONObject json) throws JSONException, Exception { assertEquals("incorrect number of elements", 1, json.length()); JSONObject clusterinfo = json.getJSONObject("clusterMetrics"); - assertEquals("incorrect number of elements", 32, clusterinfo.length()); + assertEquals("incorrect number of elements", 33, clusterinfo.length()); verifyClusterMetrics( clusterinfo.getInt("appsSubmitted"), clusterinfo.getInt("appsCompleted"), clusterinfo.getInt("reservedMB"), clusterinfo.getInt("availableMB"), From f55a214025d27dcc0d1e02d765e13418bf496e67 Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Mon, 17 May 2021 21:07:29 +0200 Subject: [PATCH 325/666] YARN-10258. Add metrics for 'ApplicationsRunning' in NodeManager. Contributed by ANANDA G B. --- .../containermanager/ContainerManagerImpl.java | 2 ++ .../application/ApplicationImpl.java | 3 +++ .../nodemanager/metrics/NodeManagerMetrics.java | 10 ++++++++++ .../TestContainerManagerRecovery.java | 6 +++--- .../metrics/TestNodeManagerMetrics.java | 14 +++++++++----- 5 files changed, 27 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 5eb36ba558ea0..ee1a5bf28522e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -442,6 +442,7 @@ private void recoverApplication(ContainerManagerApplicationProto p) ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), fc, appId, creds, context, p.getAppLogAggregationInitedTime()); context.getApplications().put(appId, app); + metrics.runningApplication(); app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext)); } @@ -1137,6 +1138,7 @@ protected void startContainerInternal( applicationID, credentials, context); if (context.getApplications().putIfAbsent(applicationID, application) == null) { + metrics.runningApplication(); LOG.info("Creating a new application reference for app " + applicationID); LogAggregationContext logAggregationContext = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java index 8fe9651045c49..d42097430cc5c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java @@ -623,6 +623,9 @@ static class AppLogsAggregatedTransition implements public void transition(ApplicationImpl app, ApplicationEvent event) { ApplicationId appId = event.getApplicationID(); app.context.getApplications().remove(appId); + if (null != app.context.getNodeManagerMetrics()) { + app.context.getNodeManagerMetrics().endRunningApplication(); + } app.aclsManager.removeApplication(appId); try { app.context.getNMStateStore().removeApplication(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java index 848b9445289a0..a469f653ebc3c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java @@ -100,6 +100,8 @@ public class NodeManagerMetrics { MutableGaugeFloat nodeCpuUtilization; @Metric("Current GPU utilization") MutableGaugeFloat nodeGpuUtilization; + @Metric("Current running apps") + MutableGaugeInt applicationsRunning; @Metric("Missed localization requests in bytes") MutableCounterLong localizedCacheMissBytes; @@ -187,6 +189,14 @@ public void endReInitingContainer() { containersReIniting.decr(); } + public void runningApplication() { + applicationsRunning.incr(); + } + + public void endRunningApplication() { + applicationsRunning.decr(); + } + public void pausedContainer() { containersPaused.incr(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java index c67ae86f95ef2..de29efd59fefb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java @@ -438,7 +438,7 @@ public void testNodeManagerMetricsRecovery() throws Exception { org.apache.hadoop.yarn.server.nodemanager .containermanager.container.ContainerState.RUNNING); TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, - 1, 1, 1, 9, 1, 7, 0F); + 1, 1, 1, 9, 1, 7, 0F, 1); // restart and verify metrics could be recovered cm.stop(); @@ -446,7 +446,7 @@ public void testNodeManagerMetricsRecovery() throws Exception { metrics = NodeManagerMetrics.create(); metrics.addResource(Resource.newInstance(10240, 8)); TestNodeManagerMetrics.checkMetrics(0, 0, 0, 0, 0, 0, - 0, 0, 10, 0, 8, 0F); + 0, 0, 10, 0, 8, 0F, 0); context = createContext(conf, stateStore); cm = createContainerManager(context, delSrvc); cm.init(conf); @@ -455,7 +455,7 @@ public void testNodeManagerMetricsRecovery() throws Exception { app = context.getApplications().get(appId); assertNotNull(app); TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, - 1, 1, 1, 9, 1, 7, 0F); + 1, 1, 1, 9, 1, 7, 0F, 1); cm.stop(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java index 37454747c9219..33a3ae12f109e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java @@ -103,12 +103,16 @@ public void testReferenceOfSingletonJvmMetrics() { // Set node gpu utilization metrics.setNodeGpuUtilization(35.5F); + // ApplicationsRunning expected to be 1 + metrics.runningApplication(); + metrics.runningApplication(); + metrics.endRunningApplication(); + // availableGB is expected to be floored, // while allocatedGB is expected to be ceiled. // allocatedGB: 3.75GB allocated memory is shown as 4GB // availableGB: 4.25GB available memory is shown as 4GB - checkMetrics(10, 1, 1, 1, 1, - 1, 4, 7, 4, 13, 3, 35.5F); + checkMetrics(10, 1, 1, 1, 1, 1, 4, 7, 4, 13, 3, 35.5F, 1); // Update resource and check available resource again metrics.addResource(total); @@ -120,7 +124,7 @@ public void testReferenceOfSingletonJvmMetrics() { public static void checkMetrics(int launched, int completed, int failed, int killed, int initing, int running, int allocatedGB, int allocatedContainers, int availableGB, int allocatedVCores, - int availableVCores, Float nodeGpuUtilization) { + int availableVCores, Float nodeGpuUtilization, int applicationsRunning) { MetricsRecordBuilder rb = getMetrics("NodeManagerMetrics"); assertCounter("ContainersLaunched", launched, rb); assertCounter("ContainersCompleted", completed, rb); @@ -132,8 +136,8 @@ public static void checkMetrics(int launched, int completed, int failed, assertGauge("AllocatedVCores", allocatedVCores, rb); assertGauge("AllocatedContainers", allocatedContainers, rb); assertGauge("AvailableGB", availableGB, rb); - assertGauge("AvailableVCores",availableVCores, rb); + assertGauge("AvailableVCores", availableVCores, rb); assertGauge("NodeGpuUtilization", nodeGpuUtilization, rb); - + assertGauge("ApplicationsRunning", applicationsRunning, rb); } } From 6784522e04b91b084ad2c905ba3b0e842534387c Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 17 May 2021 20:21:54 -0700 Subject: [PATCH 326/666] HADOOP-17703. checkcompatibility.py errors out when specifying annotations. (#3017) Reviewed-by: Akira Ajisaka --- dev-support/bin/checkcompatibility.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/bin/checkcompatibility.py b/dev-support/bin/checkcompatibility.py index 3db36154ef9c5..e8c0e26a712db 100755 --- a/dev-support/bin/checkcompatibility.py +++ b/dev-support/bin/checkcompatibility.py @@ -175,7 +175,7 @@ def run_java_acc(src_name, src_jars, dst_name, dst_jars, annotations): if annotations is not None: annotations_path = os.path.join(get_scratch_dir(), "annotations.txt") - with file(annotations_path, "w") as f: + with open(annotations_path, "w") as f: for ann in annotations: print(ann, file=f) args += ["-annotations-list", annotations_path] From 18ae1b6d695b84f12e104d86830b44da575b57af Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Tue, 18 May 2021 10:11:36 -0700 Subject: [PATCH 327/666] HADOOP-17699. Remove hardcoded SunX509 usage from SSLFactory. (#3016) --- .../security/ssl/FileBasedKeyStoresFactory.java | 4 ++-- .../ssl/ReloadingX509KeystoreManager.java | 2 +- .../security/ssl/ReloadingX509TrustManager.java | 4 ++-- .../apache/hadoop/security/ssl/SSLFactory.java | 12 ++++++++++-- .../hadoop/security/ssl/TestSSLFactory.java | 16 ++++++++++++++++ 5 files changed, 31 insertions(+), 7 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java index 236d881581dac..ddaa972c07606 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java @@ -273,8 +273,8 @@ public void init(SSLFactory.Mode mode) } else { KeyStore keystore = KeyStore.getInstance(keystoreType); keystore.load(null, null); - KeyManagerFactory keyMgrFactory = KeyManagerFactory - .getInstance(SSLFactory.SSLCERTIFICATE); + KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance( + SSLFactory.KEY_MANAGER_SSLCERTIFICATE); keyMgrFactory.init(keystore, null); keyManagers = keyMgrFactory.getKeyManagers(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java index 72e8b6b63b420..216d949de1048 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509KeystoreManager.java @@ -143,7 +143,7 @@ private X509ExtendedKeyManager loadKeyManager(Path path) LOG.debug(" Loaded KeyStore: " + path.toFile().getAbsolutePath()); KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance( - SSLFactory.SSLCERTIFICATE); + SSLFactory.KEY_MANAGER_SSLCERTIFICATE); keyMgrFactory.init(keystore, (keyPassword != null) ? keyPassword.toCharArray() : null); for (KeyManager candidate: keyMgrFactory.getKeyManagers()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java index 68fd4c161005c..5ae966f2c50fa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java @@ -136,8 +136,8 @@ X509TrustManager loadTrustManager(Path path) in.close(); } - TrustManagerFactory trustManagerFactory = - TrustManagerFactory.getInstance(SSLFactory.SSLCERTIFICATE); + TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance( + SSLFactory.TRUST_MANAGER_SSLCERTIFICATE); trustManagerFactory.init(ks); TrustManager[] trustManagers = trustManagerFactory.getTrustManagers(); for (TrustManager trustManager1 : trustManagers) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java index d168a317dfd7e..fe3233d848d4f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java @@ -25,14 +25,16 @@ import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.util.PlatformName.IBM_JAVA; +import static org.apache.hadoop.util.PlatformName.JAVA_VENDOR_NAME; import javax.net.ssl.HostnameVerifier; import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLServerSocketFactory; import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManagerFactory; import java.io.IOException; import java.net.HttpURLConnection; import java.security.GeneralSecurityException; @@ -99,7 +101,13 @@ public enum Mode { CLIENT, SERVER } public static final String SSL_SERVER_EXCLUDE_CIPHER_LIST = "ssl.server.exclude.cipher.list"; - public static final String SSLCERTIFICATE = IBM_JAVA?"ibmX509":"SunX509"; + public static final String KEY_MANAGER_SSLCERTIFICATE = + JAVA_VENDOR_NAME.contains("IBM") ? "ibmX509" : + KeyManagerFactory.getDefaultAlgorithm(); + + public static final String TRUST_MANAGER_SSLCERTIFICATE = + JAVA_VENDOR_NAME.contains("IBM") ? "ibmX509" : + TrustManagerFactory.getDefaultAlgorithm(); public static final String KEYSTORES_FACTORY_CLASS_KEY = "hadoop.ssl.keystores.factory.class"; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java index 4e5a6fbd7e0f9..ece6a05ef5878 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.security.ssl; +import static java.security.Security.getProperty; +import static java.security.Security.setProperty; import static org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY; import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.TRUST_STORE_PASSWORD_DEFAULT; import static org.apache.hadoop.security.ssl.SSLFactory.Mode.CLIENT; @@ -367,6 +369,20 @@ public void invalidHostnameVerifier() throws Exception { } } + @Test + public void testDifferentAlgorithm() throws Exception { + Configuration conf = createConfiguration(false, true); + String currAlg = getProperty("ssl.KeyManagerFactory.algorithm"); + setProperty("ssl.KeyManagerFactory.algorithm", "PKIX"); + SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf); + try { + sslFactory.init(); + } finally { + sslFactory.destroy(); + setProperty("ssl.KeyManagerFactory.algorithm", currAlg); + } + } + @Test public void testConnectionConfigurator() throws Exception { Configuration conf = createConfiguration(false, true); From e0c2349a9673a8e1bf8b2c7c0144b8b04a784b21 Mon Sep 17 00:00:00 2001 From: huhaiyang Date: Wed, 19 May 2021 09:04:41 +0800 Subject: [PATCH 328/666] HDFS-15877. BlockReconstructionWork should resetTargets() before BlockManager#validateReconstructionWork return false (#2747) --- .../server/blockmanagement/BlockManager.java | 4 +++- .../blockmanagement/TestBlockManager.java | 22 +++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 247de6b411885..cc7b93fc4e4a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2246,7 +2246,8 @@ private void adjustSrcNodesAndIndices(BlockInfoStriped block, } } - private boolean validateReconstructionWork(BlockReconstructionWork rw) { + @VisibleForTesting + boolean validateReconstructionWork(BlockReconstructionWork rw) { BlockInfo block = rw.getBlock(); int priority = rw.getPriority(); // Recheck since global lock was released @@ -2281,6 +2282,7 @@ private boolean validateReconstructionWork(BlockReconstructionWork rw) { placementStatus.getAdditionalReplicasRequired())) { // If the new targets do not meet the placement policy, or at least // reduce the number of replicas needed, then no use continuing. + rw.resetTargets(); return false; } // mark that the reconstruction work is to replicate internal block to a diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 3b46702f4ed29..59709423f9f62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -1949,4 +1949,26 @@ public void testLegacyBlockInInvalidateBlocks() { assertEquals(0, ibs.getBlocks()); assertEquals(0, ibs.getECBlocks()); } + + @Test + public void testValidateReconstructionWorkAndRacksNotEnough() { + addNodes(nodes); + // Originally on only nodes in rack A. + List origNodes = rackA; + BlockInfo blockInfo = addBlockOnNodes(0, origNodes); + BlockPlacementStatus status = bm.getBlockPlacementStatus(blockInfo); + // Block has enough copies, but not enough racks. + assertFalse(status.isPlacementPolicySatisfied()); + DatanodeStorageInfo newNode = DFSTestUtil.createDatanodeStorageInfo( + "storage8", "8.8.8.8", "/rackA", "host8"); + BlockReconstructionWork work = bm.scheduleReconstruction(blockInfo, 3); + assertNotNull(work); + assertEquals(1, work.getAdditionalReplRequired()); + // the new targets in rack A. + work.setTargets(new DatanodeStorageInfo[]{newNode}); + // the new targets do not meet the placement policy return false. + assertFalse(bm.validateReconstructionWork(work)); + // validateReconstructionWork return false, need to perform resetTargets(). + assertNull(work.getTargets()); + } } From 1ae11928613fbe9c3837b355dc9df2a3c1d6ebca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 May 2021 20:14:18 -0700 Subject: [PATCH 329/666] Bump node-sass (#3004) Bumps [node-sass](https://github.com/sass/node-sass) from 4.13.0 to 4.14.1. - [Release notes](https://github.com/sass/node-sass/releases) - [Changelog](https://github.com/sass/node-sass/blob/master/CHANGELOG.md) - [Commits](https://github.com/sass/node-sass/compare/v4.13.0...v4.14.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Reviewed-by: Gergely Pollak --- .../hadoop-yarn-ui/src/main/webapp/yarn.lock | 448 ++++++++++++------ 1 file changed, 301 insertions(+), 147 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock index 8f5a4f6e7c4d7..16594389bd58e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock @@ -43,12 +43,12 @@ ajv@^4.9.1: co "^4.6.0" json-stable-stringify "^1.0.1" -ajv@^6.5.5: - version "6.10.2" - resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.10.2.tgz#d3cea04d6b017b2894ad69040fec8b623eb4bd52" - integrity sha512-TXtUUEYHuaTEbLZWIKUr5pmBuhDLy+8KYtPYdcV8qC+pOZL+NKqYwvWSRrVXHn+ZmRRAu8vJTAznH7Oag6RVRw== +ajv@^6.12.3: + version "6.12.6" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== dependencies: - fast-deep-equal "^2.0.1" + fast-deep-equal "^3.1.1" fast-json-stable-stringify "^2.0.0" json-schema-traverse "^0.4.1" uri-js "^4.2.2" @@ -99,6 +99,11 @@ ansi-regex@^3.0.0: resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" integrity sha1-7QMXwyIGT3lGbAKWa922Bas32Zg= +ansi-regex@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997" + integrity sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg== + ansi-styles@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de" @@ -109,7 +114,7 @@ ansi-styles@^2.0.1, ansi-styles@^2.1.0, ansi-styles@^2.2.1: resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" integrity sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4= -ansi-styles@^3.2.1: +ansi-styles@^3.2.0, ansi-styles@^3.2.1: version "3.2.1" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== @@ -354,11 +359,16 @@ aws-sign2@~0.7.0: resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" integrity sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg= -aws4@^1.2.1, aws4@^1.8.0: +aws4@^1.2.1: version "1.9.0" resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.9.0.tgz#24390e6ad61386b0a747265754d2a17219de862c" integrity sha512-Uvq6hVe90D0B2WEnUqtdgY1bATGz3mw33nH9Y+dmA+w5DHvUmBgkr5rM/KCHpCsiFNRUfokW/szpPPgMK2hm4A== +aws4@^1.8.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.11.0.tgz#d61f46d83b2519250e2784daf5b09479a8b41c59" + integrity sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA== + babel-core@^5.0.0: version "5.8.38" resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558" @@ -513,9 +523,9 @@ backo2@1.0.2: integrity sha1-MasayLEpNjRj41s+u2n038+6eUc= balanced-match@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" - integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== base64-arraybuffer@0.1.2: version "0.1.2" @@ -1198,10 +1208,10 @@ camelcase@^2.0.0: resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f" integrity sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8= -camelcase@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-3.0.0.tgz#32fc4b9fcdaf845fcdf7e73bb97cac2261f0ab0a" - integrity sha1-MvxLn82vhF/N9+c7uXysImHwqwo= +camelcase@^5.0.0: + version "5.3.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" + integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== can-symlink@^1.0.0: version "1.0.0" @@ -1360,14 +1370,14 @@ cliui@^2.1.0: right-align "^0.1.1" wordwrap "0.0.2" -cliui@^3.2.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-3.2.0.tgz#120601537a916d29940f934da3b48d585a39213d" - integrity sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0= +cliui@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-5.0.0.tgz#deefcfdb2e800784aa34f46fa08e06851c7bbbc5" + integrity sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA== dependencies: - string-width "^1.0.1" - strip-ansi "^3.0.1" - wrap-ansi "^2.0.0" + string-width "^3.1.0" + strip-ansi "^5.2.0" + wrap-ansi "^5.1.0" clone@^0.2.0: version "0.2.0" @@ -1760,7 +1770,7 @@ debuglog@^1.0.1: resolved "https://registry.yarnpkg.com/debuglog/-/debuglog-1.0.1.tgz#aa24ffb9ac3df9a2351837cfb2d279360cd78492" integrity sha1-qiT/uaw9+aI1GDfPstJ5NgzXhJI= -decamelize@^1.0.0, decamelize@^1.1.1, decamelize@^1.1.2: +decamelize@^1.0.0, decamelize@^1.1.2, decamelize@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA= @@ -2369,6 +2379,11 @@ ember-wormhole@^0.3.4: dependencies: ember-cli-babel "^5.0.0" +emoji-regex@^7.0.1: + version "7.0.3" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156" + integrity sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA== + encodeurl@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" @@ -2679,10 +2694,10 @@ extsprintf@^1.2.0: resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" integrity sha1-4mifjzVvrWLMplo6kcXfX5VRaS8= -fast-deep-equal@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz#7b05218ddf9667bf7f370bf7fdb2cb15fdd0aa49" - integrity sha1-ewUhjd+WZ79/Nwv3/bLLFf3Qqkk= +fast-deep-equal@^3.1.1: + version "3.1.3" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== fast-json-stable-stringify@^2.0.0: version "2.1.0" @@ -2774,6 +2789,13 @@ find-up@^1.0.0: path-exists "^2.0.0" pinkie-promise "^2.0.0" +find-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" + integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg== + dependencies: + locate-path "^3.0.0" + findup-sync@^0.2.1: version "0.2.1" resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.2.1.tgz#e0a90a450075c49466ee513732057514b81e878c" @@ -3002,6 +3024,11 @@ fstream@^1.0.0, fstream@^1.0.12, fstream@^1.0.2, fstream@~1.0.8: mkdirp ">=0.5 0" rimraf "2" +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== + gauge@~1.2.0, gauge@~1.2.5: version "1.2.7" resolved "https://registry.yarnpkg.com/gauge/-/gauge-1.2.7.tgz#e9cec5483d3d4ee0ef44b60a7d99e4935e136d93" @@ -3048,10 +3075,10 @@ generate-object-property@^1.1.0: dependencies: is-property "^1.0.0" -get-caller-file@^1.0.1: - version "1.0.3" - resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a" - integrity sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w== +get-caller-file@^2.0.1: + version "2.0.5" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== get-stdin@^4.0.1: version "4.0.1" @@ -3153,9 +3180,9 @@ glob@^6.0.1: path-is-absolute "^1.0.0" glob@^7.0.0, glob@^7.0.3, glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@~7.1.1: - version "7.1.6" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.6.tgz#141f33b81a7c2492e125594307480c46679278a6" - integrity sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA== + version "7.1.7" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.7.tgz#3b193e9233f01d42d0b3f78294bbeeb418f94a90" + integrity sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ== dependencies: fs.realpath "^1.0.0" inflight "^1.0.4" @@ -3180,9 +3207,9 @@ globals@^6.4.0: integrity sha1-hJgDKzttHMge68X3lpDY/in6v08= globule@^1.0.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/globule/-/globule-1.3.0.tgz#41d0e9fb44afd4b80d93a23263714f90b3dec904" - integrity sha512-YlD4kdMqRCQHrhVdonet4TdRtv1/sZKepvoxNT4Nrhrp5HI8XFfc8kFlGlBn2myBo80aGp8Eft259mbcUJhgSg== + version "1.3.2" + resolved "https://registry.yarnpkg.com/globule/-/globule-1.3.2.tgz#d8bdd9e9e4eef8f96e245999a5dee7eb5d8529c4" + integrity sha512-7IDTQTIu2xzXkT+6mlluidnWo+BypnbSoEVVQCGfzqnl5Ik8d3e1d4wycb8Rj9tWW+Z39uPWsdlquqiqPCd/pA== dependencies: glob "~7.1.1" lodash "~4.17.10" @@ -3195,11 +3222,16 @@ graceful-fs@^3.0.5: dependencies: natives "^1.1.3" -graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9: +graceful-fs@^4.1.11, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9: version "4.2.3" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.3.tgz#4a12ff1b60376ef09862c2093edd908328be8423" integrity sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ== +graceful-fs@^4.1.2: + version "4.2.6" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.6.tgz#ff040b2b0853b23c3d31027523706f1885d76bee" + integrity sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ== + graceful-fs@~2.0.0: version "2.0.3" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0" @@ -3253,12 +3285,12 @@ har-validator@~4.2.1: ajv "^4.9.1" har-schema "^1.0.5" -har-validator@~5.1.0: - version "5.1.3" - resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.3.tgz#1ef89ebd3e4996557675eed9893110dc350fa080" - integrity sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g== +har-validator@~5.1.3: + version "5.1.5" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.5.tgz#1f0803b9f8cb20c0fa13822df1ecddb36bde1efd" + integrity sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w== dependencies: - ajv "^6.5.5" + ajv "^6.12.3" har-schema "^2.0.0" has-ansi@^0.1.0: @@ -3309,6 +3341,13 @@ has-unicode@^2.0.0: resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" integrity sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk= +has@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== + dependencies: + function-bind "^1.1.1" + hash-for-dep@^1.0.2: version "1.5.1" resolved "https://registry.yarnpkg.com/hash-for-dep/-/hash-for-dep-1.5.1.tgz#497754b39bee2f1c4ade4521bfd2af0a7c1196e3" @@ -3367,7 +3406,12 @@ home-or-tmp@^1.0.0: os-tmpdir "^1.0.1" user-home "^1.1.1" -hosted-git-info@^2.1.4, hosted-git-info@^2.1.5, hosted-git-info@^2.4.2: +hosted-git-info@^2.1.4: + version "2.8.9" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.9.tgz#dffc0bf9a21c02209090f2aa69429e1414daf3f9" + integrity sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw== + +hosted-git-info@^2.1.5, hosted-git-info@^2.4.2: version "2.8.5" resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.5.tgz#759cfcf2c4d156ade59b0b2dfabddc42a6b9c70c" integrity sha512-kssjab8CvdXfcXMXVcvsXum4Hwdq9XGtRD3TteMEvEbq0LXyiNQr6AprqKqfeaDXze7SxWvRxdpwE6ku7ikLkg== @@ -3492,9 +3536,9 @@ imurmurhash@^0.1.4: integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= in-publish@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/in-publish/-/in-publish-2.0.0.tgz#e20ff5e3a2afc2690320b6dc552682a9c7fadf51" - integrity sha1-4g/146KvwmkDILbcVSaCqcf631E= + version "2.0.1" + resolved "https://registry.yarnpkg.com/in-publish/-/in-publish-2.0.1.tgz#948b1a535c8030561cea522f73f78f4be357e00c" + integrity sha512-oDM0kUSNFC31ShNxHKUyfZKy8ZeXZBWMjMdZHKLOk13uvT27VTL/QzRGfRUcevJhpkZAvlhPYuXkF7eNWrtyxQ== include-path-searcher@^0.1.0: version "0.1.0" @@ -3595,6 +3639,13 @@ is-builtin-module@^1.0.0: dependencies: builtin-modules "^1.0.0" +is-core-module@^2.2.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.4.0.tgz#8e9fc8e15027b011418026e98f0e6f4d86305cc1" + integrity sha512-6A2fkfq1rfeQZjxrZJGerpLCTHRNEBiSgnu0+obeJpEPZRUooHgsizvzv0ZjJwOz3iWIHdJtVWJ/tmPr3D21/A== + dependencies: + has "^1.0.3" + is-dotfile@^1.0.0: version "1.0.3" resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.3.tgz#a6a2f32ffd2dfb04f5ca25ecd0f6b83cf798a1e1" @@ -3618,11 +3669,9 @@ is-extglob@^1.0.0: integrity sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA= is-finite@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa" - integrity sha1-zGZ3aVYCvlUO8R6LSqYwU0K20Ko= - dependencies: - number-is-nan "^1.0.0" + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.1.0.tgz#904135c77fb42c0641d6aa1bcdbc4daa8da082f3" + integrity sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w== is-fullwidth-code-point@^1.0.0: version "1.0.0" @@ -3767,9 +3816,9 @@ istextorbinary@2.1.0: textextensions "1 || 2" js-base64@^2.1.8: - version "2.5.1" - resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.5.1.tgz#1efa39ef2c5f7980bb1784ade4a8af2de3291121" - integrity sha512-M7kLczedRMYX4L8Mdh4MzyAMM9O5osx+4FcOQuTvr3A9F2D9S5JXheN0ewNbrvK2UatkTRhL5ejGmGSjNMiZuw== + version "2.6.4" + resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.6.4.tgz#f4e686c5de1ea1f867dbcad3d46d969428df98c4" + integrity sha512-pZe//GGmwJndub7ZghVHz7vjb2LgC1m8B07Au3eYqeqv9emhESByMXxaEgkUkEqJe87oBbSniGYoQNIBklc7IQ== js-tokens@1.0.1: version "1.0.1" @@ -3977,6 +4026,14 @@ loader.js@4.2.3: resolved "https://registry.yarnpkg.com/loader.js/-/loader.js-4.2.3.tgz#845228877aa5317209e41f6c00d9bab36a6a4808" integrity sha1-hFIoh3qlMXIJ5B9sANm6s2pqSAg= +locate-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" + integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A== + dependencies: + p-locate "^3.0.0" + path-exists "^3.0.0" + lockfile@~1.0.1: version "1.0.4" resolved "https://registry.yarnpkg.com/lockfile/-/lockfile-1.0.4.tgz#07f819d25ae48f87e538e6578b6964a4981a5609" @@ -4228,9 +4285,9 @@ lodash@^3.10.0, lodash@^3.6.0, lodash@^3.9.3: integrity sha1-W/Rejkm6QYnhfUgnid/RW9FAt7Y= lodash@^4.0.0, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.5.1, lodash@~4.17.10, lodash@~4.17.11: - version "4.17.15" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.15.tgz#b447f6670a0455bbfeedd11392eff330ea097548" - integrity sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A== + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== lodash@~2.3.0: version "2.3.0" @@ -4434,13 +4491,25 @@ mime-db@1.42.0, "mime-db@>= 1.40.0 < 2": resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.42.0.tgz#3e252907b4c7adb906597b4b65636272cf9e7bac" integrity sha512-UbfJCR4UAVRNgMpfImz05smAXK7+c+ZntjaA26ANtkXLlOe947Aag5zdIcKQULAiF9Cq4WxBi9jUs5zkA84bYQ== -mime-types@^2.1.11, mime-types@^2.1.12, mime-types@~2.1.19, mime-types@~2.1.24, mime-types@~2.1.7: +mime-db@1.47.0: + version "1.47.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.47.0.tgz#8cb313e59965d3c05cfbf898915a267af46a335c" + integrity sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw== + +mime-types@^2.1.11, mime-types@~2.1.24, mime-types@~2.1.7: version "2.1.25" resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.25.tgz#39772d46621f93e2a80a856c53b86a62156a6437" integrity sha512-5KhStqB5xpTAeGqKBAMgwaYMnQik7teQN4IAzC7npDv6kzeU6prfkR67bc87J1kWMPGkoaZSq1npmexMgkmEVg== dependencies: mime-db "1.42.0" +mime-types@^2.1.12, mime-types@~2.1.19: + version "2.1.30" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.30.tgz#6e7be8b4c479825f85ed6326695db73f9305d62d" + integrity sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg== + dependencies: + mime-db "1.47.0" + mime@1.6.0, mime@^1.2.11: version "1.6.0" resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" @@ -4481,11 +4550,16 @@ minimist@0.0.8: resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" integrity sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0= -minimist@^1.1.0, minimist@^1.1.1, minimist@^1.1.3: +minimist@^1.1.0, minimist@^1.1.1: version "1.2.0" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" integrity sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ= +minimist@^1.1.3, minimist@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602" + integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw== + minimist@~0.0.1: version "0.0.10" resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.10.tgz#de3f98543dbf96082be48ad1a0c7cda836301dcf" @@ -4499,11 +4573,11 @@ mkdirp@0.5.0: minimist "0.0.8" mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1: - version "0.5.1" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" - integrity sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM= + version "0.5.5" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.5.tgz#d91cefd62d1436ca0f41620e251288d420099def" + integrity sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ== dependencies: - minimist "0.0.8" + minimist "^1.2.5" mkdirp@^0.3.5: version "0.3.5" @@ -4596,9 +4670,9 @@ mute-stream@~0.0.4: integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== nan@^2.13.2: - version "2.14.0" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.0.tgz#7818f722027b2459a86f0295d434d1fc2336c52c" - integrity sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg== + version "2.14.2" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.2.tgz#f5376400695168f4cc694ac9393d0c9585eeea19" + integrity sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ== natives@^1.1.3: version "1.1.6" @@ -4669,9 +4743,9 @@ node-modules-path@^1.0.0: integrity sha512-6Gbjq+d7uhkO7epaKi5DNgUJn7H0gEyA4Jg0Mo1uQOi3Rk50G83LtmhhFyw0LxnAFhtlspkiiw52ISP13qzcBg== node-sass@^4.7.2: - version "4.13.0" - resolved "https://registry.yarnpkg.com/node-sass/-/node-sass-4.13.0.tgz#b647288babdd6a1cb726de4545516b31f90da066" - integrity sha512-W1XBrvoJ1dy7VsvTAS5q1V45lREbTlZQqFbiHb3R3OTTCma0XBtuG6xZ6Z4506nR4lmHPTqVRwxT6KgtWC97CA== + version "4.14.1" + resolved "https://registry.yarnpkg.com/node-sass/-/node-sass-4.14.1.tgz#99c87ec2efb7047ed638fb4c9db7f3a42e2217b5" + integrity sha512-sjCuOlvGyCJS40R8BscF5vhVlQjNN069NtQ1gSxyK1u9iqvn6tf7O1R4GNowVZfiZUCRt5MmMs1xd+4V/7Yr0g== dependencies: async-foreach "^0.1.3" chalk "^1.1.1" @@ -4687,7 +4761,7 @@ node-sass@^4.7.2: node-gyp "^3.8.0" npmlog "^4.0.0" request "^2.88.0" - sass-graph "^2.2.4" + sass-graph "2.2.5" stdout-stream "^1.4.0" "true-case-path" "^1.0.2" @@ -5042,6 +5116,25 @@ output-file-sync@^1.1.0: mkdirp "^0.5.1" object-assign "^4.1.0" +p-limit@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" + integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== + dependencies: + p-try "^2.0.0" + +p-locate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" + integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ== + dependencies: + p-limit "^2.0.0" + +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== + parse-glob@^3.0.4: version "3.0.4" resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c" @@ -5111,6 +5204,11 @@ path-exists@^2.0.0: dependencies: pinkie-promise "^2.0.0" +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + integrity sha1-zg6+ql94yxiSXqfYENe1mwEP1RU= + path-is-absolute@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" @@ -5298,17 +5396,17 @@ pseudomap@^1.0.2: resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" integrity sha1-8FKijacOYYkX7wqKw0wa5aaChrM= -psl@^1.1.24: - version "1.6.0" - resolved "https://registry.yarnpkg.com/psl/-/psl-1.6.0.tgz#60557582ee23b6c43719d9890fb4170ecd91e110" - integrity sha512-SYKKmVel98NCOYXpkwUqZqh0ahZeeKfmisiLIcEZdsb+WbLv02g/dI5BUmZnIyOe7RzZtLax81nnb2HbvC2tzA== +psl@^1.1.28: + version "1.8.0" + resolved "https://registry.yarnpkg.com/psl/-/psl-1.8.0.tgz#9326f8bcfb013adcc005fdff056acce020e51c24" + integrity sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ== punycode@^1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" integrity sha1-wNWmOycYgArY4esPpSachN1BhF4= -punycode@^2.1.0: +punycode@^2.1.0, punycode@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== @@ -5471,7 +5569,7 @@ read@1, read@~1.0.1, read@~1.0.7: dependencies: mute-stream "~0.0.4" -"readable-stream@1 || 2", readable-stream@^2, "readable-stream@^2.0.0 || ^1.1.13", readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.2.2: +"readable-stream@1 || 2", readable-stream@^2, "readable-stream@^2.0.0 || ^1.1.13", readable-stream@^2.0.2, readable-stream@^2.2.2: version "2.3.6" resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw== @@ -5494,6 +5592,19 @@ readable-stream@1.1: isarray "0.0.1" string_decoder "~0.10.x" +readable-stream@^2.0.1, readable-stream@^2.0.6: + version "2.3.7" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.7.tgz#1eca1cf711aef814c04f62252a36a62f6cb23b57" + integrity sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw== + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + readable-stream@~1.1.13: version "1.1.14" resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.14.tgz#7cf4c54ef648e3813084c636dd2079e166c081d9" @@ -5671,9 +5782,9 @@ request-progress@~2.0.1: throttleit "^1.0.0" request@2, request@^2.27.0, request@^2.47.0, request@^2.87.0, request@^2.88.0: - version "2.88.0" - resolved "https://registry.yarnpkg.com/request/-/request-2.88.0.tgz#9c2fca4f7d35b592efe57c7f0a55e81052124fef" - integrity sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg== + version "2.88.2" + resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3" + integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw== dependencies: aws-sign2 "~0.7.0" aws4 "^1.8.0" @@ -5682,7 +5793,7 @@ request@2, request@^2.27.0, request@^2.47.0, request@^2.87.0, request@^2.88.0: extend "~3.0.2" forever-agent "~0.6.1" form-data "~2.3.2" - har-validator "~5.1.0" + har-validator "~5.1.3" http-signature "~1.2.0" is-typedarray "~1.0.0" isstream "~0.1.2" @@ -5692,7 +5803,7 @@ request@2, request@^2.27.0, request@^2.47.0, request@^2.87.0, request@^2.88.0: performance-now "^2.1.0" qs "~6.5.2" safe-buffer "^5.1.2" - tough-cookie "~2.4.3" + tough-cookie "~2.5.0" tunnel-agent "^0.6.0" uuid "^3.3.2" @@ -5781,10 +5892,10 @@ require-directory@^2.1.1: resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I= -require-main-filename@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1" - integrity sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE= +require-main-filename@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" + integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== requires-port@^1.0.0: version "1.0.0" @@ -5799,13 +5910,21 @@ resolve-package-path@^1.0.11: path-root "^0.1.1" resolve "^1.10.0" -resolve@^1.1.2, resolve@^1.1.6, resolve@^1.1.7, resolve@^1.10.0, resolve@^1.8.1: +resolve@^1.1.2, resolve@^1.1.6, resolve@^1.1.7, resolve@^1.8.1: version "1.14.1" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.14.1.tgz#9e018c540fcf0c427d678b9931cbf45e984bcaff" integrity sha512-fn5Wobh4cxbLzuHaE+nphztHy43/b++4M6SsGFC2gB8uYwf0C8LcarfCz1un7UTW8OFQg9iNjZ4xpcFVGebDPg== dependencies: path-parse "^1.0.6" +resolve@^1.10.0: + version "1.20.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975" + integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A== + dependencies: + is-core-module "^2.2.0" + path-parse "^1.0.6" + retry@^0.8.0, retry@~0.8.0: version "0.8.0" resolved "https://registry.yarnpkg.com/retry/-/retry-0.8.0.tgz#2367628dc0edb247b1eab649dc53ac8628ac2d5f" @@ -5862,11 +5981,16 @@ safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== -safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.2: +safe-buffer@>=5.1.0: version "5.2.0" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.0.tgz#b74daec49b1148f88c64b68d49b1e815c1f2f519" integrity sha512-fZEwUGbVl7kouZs1jCdMLdt95hdIv0ZeHg6L7qPeciMZhZ+/gdesW4wgTARkrFWEpspjEATAzUGPG8N2jJiwbg== +safe-buffer@^5.0.1, safe-buffer@^5.1.2: + version "5.2.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== + "safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: version "2.1.2" resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" @@ -5885,15 +6009,15 @@ sane@^1.1.1: walker "~1.0.5" watch "~0.10.0" -sass-graph@^2.2.4: - version "2.2.4" - resolved "https://registry.yarnpkg.com/sass-graph/-/sass-graph-2.2.4.tgz#13fbd63cd1caf0908b9fd93476ad43a51d1e0b49" - integrity sha1-E/vWPNHK8JCLn9k0dq1DpR0eC0k= +sass-graph@2.2.5: + version "2.2.5" + resolved "https://registry.yarnpkg.com/sass-graph/-/sass-graph-2.2.5.tgz#a981c87446b8319d96dce0671e487879bd24c2e8" + integrity sha512-VFWDAHOe6mRuT4mZRd4eKE+d8Uedrk6Xnh7Sh9b4NGufQLQjOrvf/MQoOdx+0s92L89FeyUUNfU597j/3uNpag== dependencies: glob "^7.0.0" lodash "^4.0.0" scss-tokenizer "^0.2.3" - yargs "^7.0.0" + yargs "^13.3.2" scss-tokenizer@^0.2.3: version "0.2.3" @@ -5990,7 +6114,12 @@ sigmund@~1.0.0: resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590" integrity sha1-P/IfGYytIXX587eBhT/ZTQ0ZtZA= -signal-exit@^3.0.0, signal-exit@^3.0.2: +signal-exit@^3.0.0: + version "3.0.3" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" + integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== + +signal-exit@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" integrity sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0= @@ -6138,22 +6267,22 @@ spawnback@~1.0.0: integrity sha1-9zZi9+VNlTZ+ynTWQmxnfdfqaG8= spdx-correct@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.0.tgz#fb83e504445268f154b074e218c87c003cd31df4" - integrity sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q== + version "3.1.1" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.1.tgz#dece81ac9c1e6713e5f7d1b6f17d468fa53d89a9" + integrity sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w== dependencies: spdx-expression-parse "^3.0.0" spdx-license-ids "^3.0.0" spdx-exceptions@^2.1.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz#2ea450aee74f2a89bfb94519c07fcd6f41322977" - integrity sha512-2XQACfElKi9SlVb1CYadKDXvoajPgBVPn/gOQLrTvHdElaVhr7ZEbqJaRnJLVNeaI4cMEAgVCeBMKF6MWRDCRA== + version "2.3.0" + resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz#3f28ce1a77a00372683eade4a433183527a2163d" + integrity sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A== spdx-expression-parse@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz#99e119b7a5da00e05491c9fa338b7904823b41d0" - integrity sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg== + version "3.0.1" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz#cf70f50482eefdc98e3ce0a6833e4a53ceeba679" + integrity sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q== dependencies: spdx-exceptions "^2.1.0" spdx-license-ids "^3.0.0" @@ -6164,9 +6293,9 @@ spdx-license-ids@^1.0.0: integrity sha1-yd96NCRZSt5r0RkA1ZZpbcBrrFc= spdx-license-ids@^3.0.0: - version "3.0.5" - resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz#3694b5804567a458d3c8045842a6358632f62654" - integrity sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q== + version "3.0.7" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.7.tgz#e9c18a410e5ed7e12442a549fbd8afa767038d65" + integrity sha512-U+MTEOO0AiDzxwFvoa4JVnMV6mZlJKk2sBLt90s7G0Gd0Mlknc7kxEn3nuDPNZRta7O2uy8oLcZLVT+4sqNZHQ== spdx@~0.4.1: version "0.4.3" @@ -6222,7 +6351,7 @@ stdout-stream@^1.4.0: dependencies: readable-stream "^2.0.1" -string-width@^1.0.1, string-width@^1.0.2: +string-width@^1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" integrity sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M= @@ -6239,6 +6368,15 @@ string-width@^1.0.1, string-width@^1.0.2: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" +string-width@^3.0.0, string-width@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" + integrity sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w== + dependencies: + emoji-regex "^7.0.1" + is-fullwidth-code-point "^2.0.0" + strip-ansi "^5.1.0" + string.prototype.endswith@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/string.prototype.endswith/-/string.prototype.endswith-0.2.0.tgz#a19c20dee51a98777e9a47e10f09be393b9bba75" @@ -6299,6 +6437,13 @@ strip-ansi@^4.0.0: dependencies: ansi-regex "^3.0.0" +strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae" + integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA== + dependencies: + ansi-regex "^4.1.0" + strip-ansi@~0.1.0: version "0.1.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991" @@ -6493,13 +6638,13 @@ tough-cookie@~2.3.0: dependencies: punycode "^1.4.1" -tough-cookie@~2.4.3: - version "2.4.3" - resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.4.3.tgz#53f36da3f47783b0925afa06ff9f3b165280f781" - integrity sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ== +tough-cookie@~2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" + integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== dependencies: - psl "^1.1.24" - punycode "^1.4.1" + psl "^1.1.28" + punycode "^2.1.1" tree-sync@^1.2.2: version "1.4.0" @@ -6643,9 +6788,9 @@ unpipe@1.0.0, unpipe@~1.0.0: integrity sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw= uri-js@^4.2.2: - version "4.2.2" - resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.2.2.tgz#94c540e1ff772956e2299507c010aea6c8838eb0" - integrity sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ== + version "4.4.1" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== dependencies: punycode "^2.1.0" @@ -6684,11 +6829,16 @@ uuid@^2.0.1: resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a" integrity sha1-Z+LoY3lyFVMN/zGOW/nc6/1Hsho= -uuid@^3.0.0, uuid@^3.3.2: +uuid@^3.0.0: version "3.3.3" resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.3.tgz#4568f0216e78760ee1dbf3a4d2cf53e224112866" integrity sha512-pW0No1RGHgzlpHJO1nsVrHKpOEIxkGg1xB+v0ZmdNH5OAeAwzAVrCnI2/6Mtx+Uys6iaylxa+D3g4j63IKKjSQ== +uuid@^3.3.2: + version "3.4.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee" + integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== + validate-npm-package-license@^3.0.1, validate-npm-package-license@~3.0.1: version "3.0.4" resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" @@ -6779,10 +6929,10 @@ websocket-extensions@>=0.1.1: resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.3.tgz#5d2ff22977003ec687a4b87073dfbbac146ccf29" integrity sha512-nqHUnMXmBzT0w570r2JpJxfiSD1IzoI+HGVdd3aZ0yNi3ngvQ4jv1dtHt5VGxfI2yj5yqImPhOK4vmIh2xMbGg== -which-module@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/which-module/-/which-module-1.0.0.tgz#bba63ca861948994ff307736089e3b96026c2a4f" - integrity sha1-u6Y8qGGUiZT/MHc2CJ47lgJsKk8= +which-module@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" + integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho= which@1, which@^1.2.8, which@^1.2.9: version "1.3.1" @@ -6832,13 +6982,14 @@ workerpool@^2.3.0: dependencies: object-assign "4.1.1" -wrap-ansi@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85" - integrity sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU= +wrap-ansi@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-5.1.0.tgz#1fd1f67235d5b6d0fee781056001bfb694c03b09" + integrity sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q== dependencies: - string-width "^1.0.1" - strip-ansi "^3.0.1" + ansi-styles "^3.2.0" + string-width "^3.0.0" + strip-ansi "^5.0.0" wrappy@1, wrappy@~1.0.1: version "1.0.2" @@ -6893,11 +7044,16 @@ xtend@^4.0.0: resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== -y18n@^3.2.0, y18n@^3.2.1: +y18n@^3.2.0: version "3.2.2" resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.2.tgz#85c901bd6470ce71fc4bb723ad209b70f7f28696" integrity sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ== +y18n@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.3.tgz#b5f259c82cd6e336921efd7bfd8bf560de9eeedf" + integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ== + yallist@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52" @@ -6912,31 +7068,29 @@ yam@0.0.18: fs-extra "^0.16.3" lodash.merge "^3.0.2" -yargs-parser@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-5.0.0.tgz#275ecf0d7ffe05c77e64e7c86e4cd94bf0e1228a" - integrity sha1-J17PDX/+Bcd+ZOfIbkzZS/DhIoo= +yargs-parser@^13.1.2: + version "13.1.2" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-13.1.2.tgz#130f09702ebaeef2650d54ce6e3e5706f7a4fb38" + integrity sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg== dependencies: - camelcase "^3.0.0" + camelcase "^5.0.0" + decamelize "^1.2.0" -yargs@^7.0.0: - version "7.1.0" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-7.1.0.tgz#6ba318eb16961727f5d284f8ea003e8d6154d0c8" - integrity sha1-a6MY6xaWFyf10oT46gA+jWFU0Mg= +yargs@^13.3.2: + version "13.3.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-13.3.2.tgz#ad7ffefec1aa59565ac915f82dccb38a9c31a2dd" + integrity sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw== dependencies: - camelcase "^3.0.0" - cliui "^3.2.0" - decamelize "^1.1.1" - get-caller-file "^1.0.1" - os-locale "^1.4.0" - read-pkg-up "^1.0.1" + cliui "^5.0.0" + find-up "^3.0.0" + get-caller-file "^2.0.1" require-directory "^2.1.1" - require-main-filename "^1.0.1" + require-main-filename "^2.0.0" set-blocking "^2.0.0" - string-width "^1.0.2" - which-module "^1.0.0" - y18n "^3.2.1" - yargs-parser "^5.0.0" + string-width "^3.0.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^13.1.2" yargs@~3.10.0: version "3.10.0" From 7d6e3eb1abdae858469dd46a6f57a1fc10e99a41 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Tue, 18 May 2021 22:55:47 -0700 Subject: [PATCH 330/666] HADOOP-17426. Upgrade to hadoop-thirdparty-1.1.0. (#3024) Signed-off-by: Akira Ajisaka Change-Id: Ibf27246b3eb1b074b938f44f4906db9184466303 --- hadoop-project/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 1f0be3b2976a3..b92a585470c1f 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -91,7 +91,7 @@ 3.7.1 ${env.HADOOP_PROTOC_PATH} - 1.1.0-SNAPSHOT + 1.1.0 ${hadoop-thirdparty.version} ${hadoop-thirdparty.version} org.apache.hadoop.thirdparty From 5929d827bbaea1d0fd5e1cfdbe5820c378ef136f Mon Sep 17 00:00:00 2001 From: lfengnan Date: Wed, 19 May 2021 10:53:42 -0700 Subject: [PATCH 331/666] HDFS-15757 RBF: Improving Router Connection Management (#2651) --- .../metrics/FederationRPCMBean.java | 15 +++++ .../metrics/FederationRPCMetrics.java | 10 +++ .../federation/router/ConnectionContext.java | 57 ++++++++++++---- .../federation/router/ConnectionManager.java | 53 +++++++++++++-- .../federation/router/ConnectionPool.java | 67 +++++++++++++++---- .../federation/router/RouterRpcClient.java | 18 +++++ .../router/TestConnectionManager.java | 28 +++++--- 7 files changed, 209 insertions(+), 39 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java index 3cde5e5b93cab..a4469a3025a11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java @@ -76,6 +76,21 @@ public interface FederationRPCMBean { */ int getRpcClientNumActiveConnections(); + /** + * Get the number of idle RPC connections between the Router and the NNs. + * @return Number of idle RPC connections between the Router and the NNs. + */ + int getRpcClientNumIdleConnections(); + + /** + * Get the number of recently active RPC connections between + * the Router and the NNs. + * + * @return Number of recently active RPC connections between + * the Router and the NNs. + */ + int getRpcClientNumActiveConnectionsRecently(); + /** * Get the number of RPC connections to be created. * @return Number of RPC connections to be created. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java index 887d50bf3d5d5..1e6aa8050d92e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java @@ -208,6 +208,16 @@ public int getRpcClientNumActiveConnections() { return rpcServer.getRPCClient().getNumActiveConnections(); } + @Override + public int getRpcClientNumIdleConnections() { + return rpcServer.getRPCClient().getNumIdleConnections(); + } + + @Override + public int getRpcClientNumActiveConnectionsRecently() { + return rpcServer.getRPCClient().getNumActiveConnectionsRecently(); + } + @Override public int getRpcClientNumCreatingConnections() { return rpcServer.getRPCClient().getNumCreatingConnections(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java index 02a3dbeb4ea53..9a5434b91ce2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java @@ -18,9 +18,13 @@ package org.apache.hadoop.hdfs.server.federation.router; import java.net.InetSocketAddress; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo; import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Context to track a connection in a {@link ConnectionPool}. When a client uses @@ -36,13 +40,19 @@ */ public class ConnectionContext { + private static final Logger LOG = + LoggerFactory.getLogger(ConnectionContext.class); + /** Client for the connection. */ private final ProxyAndInfo client; /** How many threads are using this connection. */ private int numThreads = 0; /** If the connection is closed. */ private boolean closed = false; - + /** Last timestamp the connection was active. */ + private long lastActiveTs = 0; + /** The connection's active status would expire after this window. */ + private final static long ACTIVE_WINDOW_TIME = TimeUnit.SECONDS.toMillis(30); public ConnectionContext(ProxyAndInfo connection) { this.client = connection; @@ -57,6 +67,16 @@ public synchronized boolean isActive() { return this.numThreads > 0; } + /** + * Check if the connection is/was active recently. + * + * @return True if the connection is active or + * was active in the past period of time. + */ + public synchronized boolean isActiveRecently() { + return Time.monotonicNow() - this.lastActiveTs <= ACTIVE_WINDOW_TIME; + } + /** * Check if the connection is closed. * @@ -83,30 +103,41 @@ public synchronized boolean isUsable() { */ public synchronized ProxyAndInfo getClient() { this.numThreads++; + this.lastActiveTs = Time.monotonicNow(); return this.client; } /** - * Release this connection. If the connection was closed, close the proxy. - * Otherwise, mark the connection as not used by us anymore. + * Release this connection. */ public synchronized void release() { - if (--this.numThreads == 0 && this.closed) { - close(); + if (this.numThreads > 0) { + this.numThreads--; } } /** - * We will not use this connection anymore. If it's not being used, we close - * it. Otherwise, we let release() do it once we are done with it. + * Close a connection. Only idle connections can be closed since + * the RPC proxy would be shut down immediately. + * + * @param force whether the connection should be closed anyway. */ - public synchronized void close() { - this.closed = true; - if (this.numThreads == 0) { - Object proxy = this.client.getProxy(); - // Nobody should be using this anymore so it should close right away - RPC.stopProxy(proxy); + public synchronized void close(boolean force) { + if (!force && this.numThreads > 0) { + // this is an erroneous case but we have to close the connection + // anyway since there will be connection leak if we don't do so + // the connection has been moved out of the pool + LOG.error("Active connection with {} handlers will be closed", + this.numThreads); } + this.closed = true; + Object proxy = this.client.getProxy(); + // Nobody should be using this anymore so it should close right away + RPC.stopProxy(proxy); + } + + public synchronized void close() { + close(false); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index 9ec3b54ed50b7..b773a79948edc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -281,6 +281,42 @@ public int getNumActiveConnections() { return total; } + /** + * Get number of idle connections. + * + * @return Number of active connections. + */ + public int getNumIdleConnections() { + int total = 0; + readLock.lock(); + try { + for (ConnectionPool pool : this.pools.values()) { + total += pool.getNumIdleConnections(); + } + } finally { + readLock.unlock(); + } + return total; + } + + /** + * Get number of recently active connections. + * + * @return Number of recently active connections. + */ + public int getNumActiveConnectionsRecently() { + int total = 0; + readLock.lock(); + try { + for (ConnectionPool pool : this.pools.values()) { + total += pool.getNumActiveConnectionsRecently(); + } + } finally { + readLock.unlock(); + } + return total; + } + /** * Get the number of connections to be created. * @@ -327,12 +363,21 @@ void cleanup(ConnectionPool pool) { // Check if the pool hasn't been active in a while or not 50% are used long timeSinceLastActive = Time.now() - pool.getLastActiveTime(); int total = pool.getNumConnections(); - int active = pool.getNumActiveConnections(); + // Active is a transient status in many cases for a connection since + // the handler thread uses the connection very quickly. Thus the number + // of connections with handlers using at the call time is constantly low. + // Recently active is more lasting status and it shows how many + // connections have been used with a recent time period. (i.e. 30 seconds) + int active = pool.getNumActiveConnectionsRecently(); float poolMinActiveRatio = pool.getMinActiveRatio(); if (timeSinceLastActive > connectionCleanupPeriodMs || active < poolMinActiveRatio * total) { - // Remove and close 1 connection - List conns = pool.removeConnections(1); + // Be greedy here to close as many connections as possible in one shot + // The number should at least be 1 + int targetConnectionsCount = Math.max(1, + (int)(poolMinActiveRatio * total) - active); + List conns = + pool.removeConnections(targetConnectionsCount); for (ConnectionContext conn : conns) { conn.close(); } @@ -414,7 +459,7 @@ public void run() { ConnectionPool pool = this.queue.take(); try { int total = pool.getNumConnections(); - int active = pool.getNumActiveConnections(); + int active = pool.getNumActiveConnectionsRecently(); float poolMinActiveRatio = pool.getMinActiveRatio(); if (pool.getNumConnections() < pool.getMaxSize() && active >= poolMinActiveRatio * total) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index 52e7cebd26017..827e62ce3ef1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -252,19 +252,23 @@ public synchronized void addConnection(ConnectionContext conn) { */ public synchronized List removeConnections(int num) { List removed = new LinkedList<>(); - - // Remove and close the last connection - List tmpConnections = new ArrayList<>(); - for (int i=0; i this.minSize) { + int targetCount = Math.min(num, this.connections.size() - this.minSize); + // Remove and close targetCount of connections + List tmpConnections = new ArrayList<>(); + for (int i = 0; i < this.connections.size(); i++) { + ConnectionContext conn = this.connections.get(i); + // Only pick idle connections to close + if (removed.size() < targetCount && conn.isUsable()) { + removed.add(conn); + } else { + tmpConnections.add(conn); + } } + this.connections = tmpConnections; } - this.connections = tmpConnections; - + LOG.debug("Expected to remove {} connection " + + "and actually removed {} connections", num, removed.size()); return removed; } @@ -278,7 +282,7 @@ protected synchronized void close() { this.connectionPoolId, timeSinceLastActive); for (ConnectionContext connection : this.connections) { - connection.close(); + connection.close(true); } this.connections.clear(); } @@ -309,6 +313,39 @@ protected int getNumActiveConnections() { return ret; } + /** + * Number of usable i.e. no active thread connections. + * + * @return Number of idle connections + */ + protected int getNumIdleConnections() { + int ret = 0; + + List tmpConnections = this.connections; + for (ConnectionContext conn : tmpConnections) { + if (conn.isUsable()) { + ret++; + } + } + return ret; + } + + /** + * Number of active connections recently in the pool. + * + * @return Number of active connections recently. + */ + protected int getNumActiveConnectionsRecently() { + int ret = 0; + List tmpConnections = this.connections; + for (ConnectionContext conn : tmpConnections) { + if (conn.isActiveRecently()) { + ret++; + } + } + return ret; + } + /** * Get the last time the connection pool was used. * @@ -331,12 +368,18 @@ public String toString() { public String getJSON() { final Map info = new LinkedHashMap<>(); info.put("active", Integer.toString(getNumActiveConnections())); + info.put("recent_active", + Integer.toString(getNumActiveConnectionsRecently())); + info.put("idle", Integer.toString(getNumIdleConnections())); info.put("total", Integer.toString(getNumConnections())); if (LOG.isDebugEnabled()) { List tmpConnections = this.connections; for (int i=0; i Date: Thu, 20 May 2021 10:47:33 +0900 Subject: [PATCH 332/666] HADOOP-17663. Remove useless property hadoop.assemblies.version in pom file. (#3025) Reviewed-by: Wei-Chiu Chuang --- hadoop-project/pom.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index b92a585470c1f..fc85ff9c021b3 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -52,7 +52,6 @@ 2.4.0 - 3.4.0-SNAPSHOT 1.0.13 ${project.build.directory}/test-dir From 035996c7258c37412f999944099a3d6204ec65e4 Mon Sep 17 00:00:00 2001 From: Hongbing Wang <284734261@qq.com> Date: Thu, 20 May 2021 11:23:54 +0800 Subject: [PATCH 333/666] =?UTF-8?q?HDFS-16018.=20Optimize=20the=20display?= =?UTF-8?q?=20of=20hdfs=20"count=20-e"=20or=20"count=20-t"=20com=E2=80=A6?= =?UTF-8?q?=20(#2994)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../org/apache/hadoop/fs/ContentSummary.java | 25 ++++++++++++++++ .../java/org/apache/hadoop/fs/QuotaUsage.java | 12 ++++---- .../org/apache/hadoop/fs/shell/Count.java | 12 +++----- .../org/apache/hadoop/fs/shell/TestCount.java | 29 +++++++++---------- 4 files changed, 50 insertions(+), 28 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java index 20e205a8b32cf..80a6c0c05b6dd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java @@ -281,6 +281,20 @@ public int hashCode() { private static final String ALL_HEADER = QUOTA_HEADER + SUMMARY_HEADER; + /** + * Output format: + * <--------20--------> + * ERASURECODING_POLICY + */ + private static final String ERASURECODING_POLICY_FORMAT = "%20s "; + + private static final String ERASURECODING_POLICY_HEADER_FIELD = + "ERASURECODING_POLICY"; + + /** The header string. */ + private static final String ERASURECODING_POLICY_HEADER = String.format( + ERASURECODING_POLICY_FORMAT, ERASURECODING_POLICY_HEADER_FIELD); + /** * Output format:<-------18-------> <----------24----------> * <----------24---------->. <-------------28------------> SNAPSHOT_LENGTH @@ -308,6 +322,10 @@ public static String getHeader(boolean qOption) { return qOption ? ALL_HEADER : SUMMARY_HEADER; } + public static String getErasureCodingPolicyHeader() { + return ERASURECODING_POLICY_HEADER; + } + public static String getSnapshotHeader() { return SNAPSHOT_HEADER; } @@ -444,6 +462,13 @@ private String formatSize(long size, boolean humanReadable) { : String.valueOf(size); } + /** + * @return Constant-width String representation of Erasure Coding Policy + */ + public String toErasureCodingPolicy() { + return String.format(ERASURECODING_POLICY_FORMAT, erasureCodingPolicy); + } + /** * Return the string representation of the snapshot counts in the output * format. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java index 11cc93401748e..b00a31891c867 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java @@ -242,12 +242,14 @@ public boolean equals(Object obj) { /** * Output format: - * |----12----| |------15-----| |------15-----| |------15-----| - * QUOTA REM_QUOTA SPACE_QUOTA REM_SPACE_QUOTA - * |----12----| |----12----| |-------18-------| - * DIR_COUNT FILE_COUNT CONTENT_SIZE + * |-----14-----| |-------18------| |-----14-----| |-------18------| + * SSD_QUOTA REM_SSD_QUOTA DISK_QUOTA REM_DISK_QUOTA + * |-----14-----| |-------18------| |-----14-----| |-------18------| + * ARCHIVE_QUOTA REM_ARCHIVE_QUOTA PROVIDED_QUOTA REM_PROVIDED_QUOTA + * |-----14-----| |-------18------| |-------18------| + * NVDIMM_QUOTA REM_NVDIMM_QUOTA PATHNAME */ - private static final String STORAGE_TYPE_SUMMARY_FORMAT = "%13s %17s "; + private static final String STORAGE_TYPE_SUMMARY_FORMAT = "%14s %18s "; /** Return the header of the output. * @return the header of the output diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java index cb3858d63b2ec..bd7be229725e7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java @@ -166,8 +166,8 @@ protected void processOptions(LinkedList args) { headString.append(ContentSummary.getHeader(showQuotas)); } } - if(displayECPolicy){ - headString.append("ERASURECODING_POLICY "); + if (displayECPolicy) { + headString.append(ContentSummary.getErasureCodingPolicyHeader()); } if (showSnapshot) { headString.append(ContentSummary.getSnapshotHeader()); @@ -204,13 +204,9 @@ protected void processPath(PathData src) throws IOException { outputString.append(summary.toString( showQuotas, isHumanReadable(), excludeSnapshots)); } - if(displayECPolicy){ + if (displayECPolicy) { ContentSummary summary = src.fs.getContentSummary(src.path); - if(!summary.getErasureCodingPolicy().equals("Replicated")){ - outputString.append("EC:"); - } - outputString.append(summary.getErasureCodingPolicy()) - .append(" "); + outputString.append(summary.toErasureCodingPolicy()); } if (showSnapshot) { ContentSummary summary = src.fs.getContentSummary(src.path); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java index 19516b8c40799..c86a4c89dfb9f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java @@ -282,11 +282,10 @@ public void processPathWithQuotasByStorageTypesHeader() throws Exception { options.add("dummy"); count.processOptions(options); String withStorageTypeHeader = - // <----13---> <-------17------> <----13-----> <------17-------> - " SSD_QUOTA REM_SSD_QUOTA DISK_QUOTA REM_DISK_QUOTA " + - // <----13---> <-------17------> - "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA PROVIDED_QUOTA REM_PROVIDED_QUOTA " + - " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + + // <----14----> <-------18-------> <-----14-----> <-------18-------> + " SSD_QUOTA REM_SSD_QUOTA DISK_QUOTA REM_DISK_QUOTA " + + " ARCHIVE_QUOTA REM_ARCHIVE_QUOTA PROVIDED_QUOTA REM_PROVIDED_QUOTA " + + " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + "PATHNAME"; verify(out).println(withStorageTypeHeader); verifyNoMoreInteractions(out); @@ -311,8 +310,8 @@ public void processPathWithQuotasBySSDStorageTypesHeader() throws Exception { options.add("dummy"); count.processOptions(options); String withStorageTypeHeader = - // <----13---> <-------17------> - " SSD_QUOTA REM_SSD_QUOTA " + + // <----14----> <-------18-------> + " SSD_QUOTA REM_SSD_QUOTA " + "PATHNAME"; verify(out).println(withStorageTypeHeader); verifyNoMoreInteractions(out); @@ -337,12 +336,12 @@ public void processPathWithQuotasByQTVH() throws Exception { options.add("dummy"); count.processOptions(options); String withStorageTypeHeader = - // <----13---> <-------17------> - " SSD_QUOTA REM_SSD_QUOTA " + - " DISK_QUOTA REM_DISK_QUOTA " + - "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " + + // <----14----> <-------18-------> + " SSD_QUOTA REM_SSD_QUOTA " + + " DISK_QUOTA REM_DISK_QUOTA " + + " ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " + "PROVIDED_QUOTA REM_PROVIDED_QUOTA " + - " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + + " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + "PATHNAME"; verify(out).println(withStorageTypeHeader); verifyNoMoreInteractions(out); @@ -405,9 +404,9 @@ public void processPathWithQuotasByMultipleStorageTypes() throws Exception { options.add("dummy"); count.processOptions(options); String withStorageTypeHeader = - // <----13---> <------17-------> - " SSD_QUOTA REM_SSD_QUOTA " + - " DISK_QUOTA REM_DISK_QUOTA " + + // <----14----> <------18--------> + " SSD_QUOTA REM_SSD_QUOTA " + + " DISK_QUOTA REM_DISK_QUOTA " + "PATHNAME"; verify(out).println(withStorageTypeHeader); verifyNoMoreInteractions(out); From d4be8da2ea92e6fbe41bfd2d16cc9c40c2e34f7e Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 20 May 2021 21:17:04 +0530 Subject: [PATCH 334/666] HADOOP-17115. Replace Guava Sets usage by Hadoop's own Sets in hadoop-common and hadoop-tools (#2985) Signed-off-by: Sean Busbey --- hadoop-common-project/hadoop-common/pom.xml | 32 ++ .../hadoop/metrics2/lib/MutableRates.java | 2 +- .../lib/MutableRatesWithAggregation.java | 2 +- .../apache/hadoop/util/JvmPauseMonitor.java | 2 - .../java/org/apache/hadoop/util/Sets.java | 377 ++++++++++++++++++ .../hadoop/crypto/key/TestValueQueue.java | 2 +- .../TestLoadBalancingKMSClientProvider.java | 2 +- .../apache/hadoop/test/GenericTestUtils.java | 2 +- hadoop-common-project/hadoop-kms/pom.xml | 32 ++ .../crypto/key/kms/server/KMSAudit.java | 2 +- hadoop-tools/hadoop-aws/pom.xml | 32 ++ .../auth/AssumedRoleCredentialProvider.java | 2 +- .../hadoop/fs/s3a/commit/staging/Paths.java | 4 +- .../fs/s3a/TestS3AAWSCredentialsProvider.java | 2 +- .../integration/ITestS3ACommitterMRJob.java | 2 +- .../commit/staging/TestStagingCommitter.java | 2 +- .../TestStagingPartitionedTaskCommit.java | 2 +- hadoop-tools/hadoop-distcp/pom.xml | 32 ++ .../org/apache/hadoop/tools/CopyListing.java | 2 +- .../dynamometer/TestDynamometerInfra.java | 2 +- hadoop-tools/hadoop-dynamometer/pom.xml | 32 ++ 21 files changed, 552 insertions(+), 17 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Sets.java diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index faf1511e57f23..e7ad1344a8f27 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -649,6 +649,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Sets rather than Guava provided Sets + + org.apache.hadoop.thirdparty.com.google.common.collect.Sets + com.google.common.collect.Sets + + + + + + + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java index c31c2e67f8f31..a83b68bab7aa5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java @@ -22,7 +22,7 @@ import java.util.Set; import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java index 7795343de3c20..dc37f96f4f449 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import java.lang.ref.WeakReference; import java.lang.reflect.Method; import java.util.Iterator; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java index 9c9953fb14c6d..a792993493bca 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java @@ -29,10 +29,8 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Sets.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Sets.java new file mode 100644 index 0000000000000..bddcbeb21f26a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Sets.java @@ -0,0 +1,377 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import org.apache.hadoop.classification.InterfaceAudience; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Static utility methods pertaining to {@link Set} instances. + * This class is Hadoop's internal use alternative to Guava's Sets + * utility class. + * Javadocs for majority of APIs in this class are taken from Guava's Sets + * class from Guava release version 27.0-jre. + */ +@InterfaceAudience.Private +public final class Sets { + + private static final int MAX_POWER_OF_TWO = 1 << (Integer.SIZE - 2); + + private Sets() { + // empty + } + + /** + * Creates a mutable, initially empty {@code HashSet} instance. + * + *

    Note: if mutability is not required, use ImmutableSet#of() + * instead. If {@code E} is an {@link Enum} type, use {@link EnumSet#noneOf} + * instead. Otherwise, strongly consider using a {@code LinkedHashSet} + * instead, at the cost of increased memory footprint, to get + * deterministic iteration behavior. + */ + public static HashSet newHashSet() { + return new HashSet(); + } + + /** + * Creates a mutable, empty {@code TreeSet} instance sorted by the + * natural sort ordering of its elements. + * + *

    Note: if mutability is not required, use ImmutableSortedSet#of() + * instead. + * + * @return a new, empty {@code TreeSet} + */ + public static TreeSet newTreeSet() { + return new TreeSet(); + } + + /** + * Creates a mutable {@code HashSet} instance initially containing + * the given elements. + * + *

    Note: if elements are non-null and won't be added or removed + * after this point, use ImmutableSet#of() or ImmutableSet#copyOf(Object[]) + * instead. If {@code E} is an {@link Enum} type, use + * {@link EnumSet#of(Enum, Enum[])} instead. Otherwise, strongly consider + * using a {@code LinkedHashSet} instead, at the cost of increased memory + * footprint, to get deterministic iteration behavior. + * + *

    This method is just a small convenience, either for + * {@code newHashSet(}{@link Arrays#asList}{@code (...))}, or for creating an + * empty set then calling {@link Collections#addAll}. + */ + @SafeVarargs + public static HashSet newHashSet(E... elements) { + HashSet set = newHashSetWithExpectedSize(elements.length); + Collections.addAll(set, elements); + return set; + } + + /** + * Creates a mutable {@code HashSet} instance containing the given + * elements. A very thin convenience for creating an empty set then calling + * {@link Collection#addAll} or Iterables#addAll. + * + *

    Note: if mutability is not required and the elements are + * non-null, use ImmutableSet#copyOf(Iterable) instead. (Or, change + * {@code elements} to be a FluentIterable and call {@code elements.toSet()}.) + * + *

    Note: if {@code E} is an {@link Enum} type, use + * newEnumSet(Iterable, Class) instead. + */ + public static HashSet newHashSet(Iterable elements) { + return (elements instanceof Collection) + ? new HashSet(cast(elements)) + : newHashSet(elements.iterator()); + } + + /** + * Creates a mutable {@code TreeSet} instance containing the given + * elements sorted by their natural ordering. + * + *

    Note: if mutability is not required, use + * ImmutableSortedSet#copyOf(Iterable) instead. + * + *

    Note: If {@code elements} is a {@code SortedSet} with an + * explicit comparator, this method has different behavior than + * {@link TreeSet#TreeSet(SortedSet)}, which returns a {@code TreeSet} + * with that comparator. + * + *

    Note for Java 7 and later: this method is now unnecessary and + * should be treated as deprecated. Instead, use the {@code TreeSet} + * constructor directly, taking advantage of the new + * "diamond" syntax. + * + *

    This method is just a small convenience for creating an empty set and + * then calling Iterables#addAll. This method is not very useful and will + * likely be deprecated in the future. + * + * @param elements the elements that the set should contain + * @return a new {@code TreeSet} containing those elements (minus duplicates) + */ + public static TreeSet newTreeSet( + Iterable elements) { + TreeSet set = newTreeSet(); + addAll(set, elements); + return set; + } + + private static boolean addAll(TreeSet addTo, + Iterable elementsToAdd) { + if (elementsToAdd instanceof Collection) { + Collection c = cast(elementsToAdd); + return addTo.addAll(c); + } + if (elementsToAdd == null) { + throw new NullPointerException(); + } + return addAll(addTo, elementsToAdd.iterator()); + } + + /** + * Creates a mutable {@code HashSet} instance containing the given + * elements. A very thin convenience for creating an empty set and then + * calling Iterators#addAll. + * + *

    Note: if mutability is not required and the elements are + * non-null, use ImmutableSet#copyOf(Iterator) instead. + * + *

    Note: if {@code E} is an {@link Enum} type, you should create + * an {@link EnumSet} instead. + * + *

    Overall, this method is not very useful and will likely be deprecated + * in the future. + */ + public static HashSet newHashSet(Iterator elements) { + HashSet set = newHashSet(); + addAll(set, elements); + return set; + } + + /** + * Returns a new hash set using the smallest initial table size that can hold + * {@code expectedSize} elements without resizing. Note that this is not what + * {@link HashSet#HashSet(int)} does, but it is what most users want and + * expect it to do. + * + *

    This behavior can't be broadly guaranteed, but has been tested with + * OpenJDK 1.7 and 1.8. + * + * @param expectedSize the number of elements you expect to add to the + * returned set + * @return a new, empty hash set with enough capacity to hold + * {@code expectedSize} elements without resizing + * @throws IllegalArgumentException if {@code expectedSize} is negative + */ + public static HashSet newHashSetWithExpectedSize(int expectedSize) { + return new HashSet(capacity(expectedSize)); + } + + private static Collection cast(Iterable iterable) { + return (Collection) iterable; + } + + private static boolean addAll(Collection addTo, + Iterator iterator) { + if (addTo == null) { + throw new NullPointerException(); + } + if (iterator == null) { + throw new NullPointerException(); + } + boolean wasModified = false; + while (iterator.hasNext()) { + wasModified |= addTo.add(iterator.next()); + } + return wasModified; + } + + /** + * Returns the intersection of two sets as an unmodifiable set. + * The returned set contains all elements that are contained by both backing + * sets. + * + *

    Results are undefined if {@code set1} and {@code set2} are sets based + * on different equivalence relations (as {@code HashSet}, {@code TreeSet}, + * and the keySet of an {@code IdentityHashMap} all are). + */ + public static Set intersection(final Set set1, + final Set set2) { + if (set1 == null) { + throw new NullPointerException("set1"); + } + if (set2 == null) { + throw new NullPointerException("set2"); + } + Set newSet = new HashSet<>(set1); + newSet.retainAll(set2); + return Collections.unmodifiableSet(newSet); + } + + /** + * Returns the union of two sets as an unmodifiable set. + * The returned set contains all elements that are contained in either + * backing set. + * + *

    Results are undefined if {@code set1} and {@code set2} are sets + * based on different equivalence relations (as {@link HashSet}, + * {@link TreeSet}, and the {@link Map#keySet} of an + * {@code IdentityHashMap} all are). + */ + public static Set union( + final Set set1, final Set set2) { + if (set1 == null) { + throw new NullPointerException("set1"); + } + if (set2 == null) { + throw new NullPointerException("set2"); + } + Set newSet = new HashSet<>(set1); + newSet.addAll(set2); + return Collections.unmodifiableSet(newSet); + } + + /** + * Returns the difference of two sets as an unmodifiable set. + * The returned set contains all elements that are contained by {@code set1} + * and not contained by {@code set2}. + * + *

    Results are undefined if {@code set1} and {@code set2} are sets based + * on different equivalence relations (as {@code HashSet}, {@code TreeSet}, + * and the keySet of an {@code IdentityHashMap} all are). + * + * This method is used to find difference for HashSets. For TreeSets with + * strict order requirement, recommended method is + * {@link #differenceInTreeSets(Set, Set)}. + */ + public static Set difference( + final Set set1, final Set set2) { + if (set1 == null) { + throw new NullPointerException("set1"); + } + if (set2 == null) { + throw new NullPointerException("set2"); + } + Set newSet = new HashSet<>(set1); + newSet.removeAll(set2); + return Collections.unmodifiableSet(newSet); + } + + /** + * Returns the difference of two sets as an unmodifiable set. + * The returned set contains all elements that are contained by {@code set1} + * and not contained by {@code set2}. + * + *

    Results are undefined if {@code set1} and {@code set2} are sets based + * on different equivalence relations (as {@code HashSet}, {@code TreeSet}, + * and the keySet of an {@code IdentityHashMap} all are). + * + * This method is used to find difference for TreeSets. For HashSets, + * recommended method is {@link #difference(Set, Set)}. + */ + public static Set differenceInTreeSets( + final Set set1, final Set set2) { + if (set1 == null) { + throw new NullPointerException("set1"); + } + if (set2 == null) { + throw new NullPointerException("set2"); + } + Set newSet = new TreeSet<>(set1); + newSet.removeAll(set2); + return Collections.unmodifiableSet(newSet); + } + + /** + * Returns the symmetric difference of two sets as an unmodifiable set. + * The returned set contains all elements that are contained in either + * {@code set1} or {@code set2} but not in both. The iteration order of the + * returned set is undefined. + * + *

    Results are undefined if {@code set1} and {@code set2} are sets based + * on different equivalence relations (as {@code HashSet}, {@code TreeSet}, + * and the keySet of an {@code IdentityHashMap} all are). + */ + public static Set symmetricDifference( + final Set set1, final Set set2) { + if (set1 == null) { + throw new NullPointerException("set1"); + } + if (set2 == null) { + throw new NullPointerException("set2"); + } + Set intersection = new HashSet<>(set1); + intersection.retainAll(set2); + Set symmetricDifference = new HashSet<>(set1); + symmetricDifference.addAll(set2); + symmetricDifference.removeAll(intersection); + return Collections.unmodifiableSet(symmetricDifference); + } + + /** + * Creates a thread-safe set backed by a hash map. The set is backed by a + * {@link ConcurrentHashMap} instance, and thus carries the same concurrency + * guarantees. + * + *

    Unlike {@code HashSet}, this class does NOT allow {@code null} to be + * used as an element. The set is serializable. + * + * @return a new, empty thread-safe {@code Set} + */ + public static Set newConcurrentHashSet() { + return Collections.newSetFromMap(new ConcurrentHashMap()); + } + + /** + * Returns a capacity that is sufficient to keep the map from being resized + * as long as it grows no larger than expectedSize and the load factor + * is ≥ its default (0.75). + * The implementation of this method is adapted from Guava version 27.0-jre. + */ + private static int capacity(int expectedSize) { + if (expectedSize < 3) { + if (expectedSize < 0) { + throw new IllegalArgumentException( + "expectedSize cannot be negative but was: " + expectedSize); + } + return expectedSize + 1; + } + if (expectedSize < MAX_POWER_OF_TWO) { + // This is the calculation used in JDK8 to resize when a putAll + // happens; it seems to be the most conservative calculation we + // can make. 0.75 is the default load factor. + return (int) ((float) expectedSize / 0.75F + 1.0F); + } + return Integer.MAX_VALUE; // any large value + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java index 9b8638faa4b22..5da973c6a761d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java @@ -32,7 +32,7 @@ import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; public class TestValueQueue { Logger LOG = LoggerFactory.getLogger(TestValueQueue.class); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java index 3684a1d0db68d..9122389bd6e54 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java @@ -63,7 +63,7 @@ import org.junit.rules.Timeout; import org.mockito.Mockito; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; public class TestLoadBalancingKMSClientProvider { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 8b1750def78b8..effb2608a2687 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -72,7 +72,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.util.functional.CommonCallableSupplier.submit; diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 9de8b9caf6e68..c932bae2fbc7f 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -244,6 +244,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Sets rather than Guava provided Sets + + org.apache.hadoop.thirdparty.com.google.common.collect.Sets + com.google.common.collect.Sets + + + + + + + diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java index 4c64a37feabbd..3a11ec134f619 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java @@ -36,7 +36,7 @@ import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalListener; import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalNotification; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import java.util.HashSet; diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 0cab5ada2169d..1d82e44aece2e 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -467,6 +467,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Sets rather than Guava provided Sets + + org.apache.hadoop.thirdparty.com.google.common.collect.Sets + com.google.common.collect.Sets + + + + + + + diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java index c467702575464..53ac4494b7786 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java @@ -33,7 +33,7 @@ import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder; import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java index 5f9e6e21363ee..c32a06a418aff 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.s3a.commit.staging; import java.io.IOException; +import java.util.LinkedHashSet; import java.util.List; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -27,7 +28,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.UncheckedExecutionException; import org.apache.commons.lang3.StringUtils; @@ -290,7 +290,7 @@ public static Set getPartitions(Path attemptPath, List taskOutput) throws IOException { // get a list of partition directories - Set partitions = Sets.newLinkedHashSet(); + Set partitions = new LinkedHashSet<>(); for (FileStatus fileStatus : taskOutput) { // sanity check the output paths Path outputFile = fileStatus.getPath(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java index 08d5e0db0d2f0..f273e68371e58 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java @@ -30,7 +30,7 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.EnvironmentVariableCredentialsProvider; import com.amazonaws.auth.InstanceProfileCredentialsProvider; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java index 79838d6f00b46..aa068c08d667c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java @@ -34,7 +34,7 @@ import java.util.UUID; import java.util.stream.Collectors; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.assertj.core.api.Assertions; import org.junit.FixMethodOrder; import org.junit.Rule; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java index 15ab89286d65a..94c0b29a3cdcc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java @@ -35,7 +35,7 @@ import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.assertj.core.api.Assertions; import org.hamcrest.core.StringStartsWith; import org.junit.After; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java index 2f46b72fbaecb..9e98a226ef150 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java @@ -26,7 +26,7 @@ import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.assertj.core.api.Assertions; import org.junit.BeforeClass; import org.junit.Test; diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml index 7e5aaebc08513..da598fc341ed4 100644 --- a/hadoop-tools/hadoop-distcp/pom.xml +++ b/hadoop-tools/hadoop-distcp/pom.xml @@ -223,6 +223,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Sets rather than Guava provided Sets + + org.apache.hadoop.thirdparty.com.google.common.collect.Sets + com.google.common.collect.Sets + + + + + + + diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java index 164d014876dbd..9f2f62d44a02c 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java @@ -35,7 +35,7 @@ import java.net.URI; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; /** * The CopyListing abstraction is responsible for how the list of diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java index 092a54b78aaad..316a2bd276c61 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.dynamometer; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import java.util.Optional; import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeUnit; diff --git a/hadoop-tools/hadoop-dynamometer/pom.xml b/hadoop-tools/hadoop-dynamometer/pom.xml index 30e28b6bf6bfb..4811e695c31c7 100644 --- a/hadoop-tools/hadoop-dynamometer/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/pom.xml @@ -50,6 +50,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Sets rather than Guava provided Sets + + org.apache.hadoop.thirdparty.com.google.common.collect.Sets + com.google.common.collect.Sets + + + + + + + From 30903fe6d66919474c3ba031c872bd0b0757c941 Mon Sep 17 00:00:00 2001 From: Masatake Iwasaki Date: Fri, 21 May 2021 01:36:26 +0000 Subject: [PATCH 335/666] HADOOP-14922. Build of Mapreduce Native Task module fails with unknown opcode "bswap". Contributed by Anup Halarnkar. --- .../src/main/native/src/lib/primitives.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h index 3bf5f767ed2b7..000c4b91fd972 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/primitives.h @@ -99,6 +99,8 @@ inline void simple_memcpy(void * dest, const void * src, size_t len) { inline uint32_t bswap(uint32_t val) { #ifdef __aarch64__ __asm__("rev %w[dst], %w[src]" : [dst]"=r"(val) : [src]"r"(val)); +#elif defined(__ppc64__)||(__PPC64__)||(__powerpc64__) + return __builtin_bswap32(val); #else __asm__("bswap %0" : "=r" (val) : "0" (val)); #endif @@ -108,6 +110,8 @@ inline uint32_t bswap(uint32_t val) { inline uint64_t bswap64(uint64_t val) { #ifdef __aarch64__ __asm__("rev %[dst], %[src]" : [dst]"=r"(val) : [src]"r"(val)); +#elif defined(__ppc64__)||(__PPC64__)||(__powerpc64__) + return __builtin_bswap64(val); #else #ifdef __X64 __asm__("bswapq %0" : "=r" (val) : "0" (val)); From 0794d685a858823cba7194dd170303b24d3d0d83 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 21 May 2021 12:40:50 +0900 Subject: [PATCH 336/666] YARN-9279. Remove the old hamlet package. (#2986) Signed-off-by: Takanobu Asanuma --- .../hadoop-yarn/hadoop-yarn-common/pom.xml | 30 - .../hadoop/yarn/webapp/hamlet/Hamlet.java | 30561 ---------------- .../hadoop/yarn/webapp/hamlet/HamletGen.java | 451 - .../hadoop/yarn/webapp/hamlet/HamletImpl.java | 387 - .../hadoop/yarn/webapp/hamlet/HamletSpec.java | 3103 -- .../yarn/webapp/hamlet/package-info.java | 27 - .../yarn/webapp/hamlet/TestParseSelector.java | 57 - .../{hamlet => hamlet2}/TestHamlet.java | 84 +- .../{hamlet => hamlet2}/TestHamletImpl.java | 75 +- .../webapp/hamlet2/TestParseSelector.java | 57 + 10 files changed, 135 insertions(+), 34697 deletions(-) delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestParseSelector.java rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/{hamlet => hamlet2}/TestHamlet.java (65%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/{hamlet => hamlet2}/TestHamletImpl.java (55%) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet2/TestParseSelector.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index 5ab84a97677e7..195999fe1ff38 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -378,34 +378,4 @@ - - - - java9 - - [9,) - - - - - maven-compiler-plugin - - - org/apache/hadoop/yarn/webapp/hamlet/** - - - org/apache/hadoop/yarn/webapp/hamlet/** - - - - - maven-javadoc-plugin - - org.apache.hadoop.yarn.webapp.hamlet - - - - - - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java deleted file mode 100644 index 7213865361232..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java +++ /dev/null @@ -1,30561 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -// Generated by HamletGen. Do NOT edit! -package org.apache.hadoop.yarn.webapp.hamlet; -import static java.util.EnumSet.of; -import static org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.EOpt.ENDTAG; -import static org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.EOpt.INLINE; -import static org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.EOpt.PRE; - -import java.io.PrintWriter; -import java.util.EnumSet; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.yarn.webapp.SubView; - -/** - * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead. - */ -@Deprecated -@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) -public class Hamlet extends HamletImpl implements HamletSpec._Html { - public Hamlet(PrintWriter out, int nestLevel, boolean wasInline) { - super(out, nestLevel, wasInline); - } - - static EnumSet opt(boolean endTag, boolean inline, boolean pre) { - EnumSet opts = of(ENDTAG); - if (!endTag) opts.remove(ENDTAG); - if (inline) opts.add(INLINE); - if (pre) opts.add(PRE); - return opts; - } - - public class HTML extends EImp implements HamletSpec.HTML { - public HTML(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public HTML $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public HTML $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public HEAD> head() { - closeAttrs(); - return head_(this, false); - } - - @Override - public BODY> body() { - closeAttrs(); - return body_(this, false); - } - - @Override - public BODY> body(String selector) { - return setSelector(body(), selector); - } - - @Override - public BASE> base() { - closeAttrs(); - return base_(this, false); - } - - @Override - public HTML base(String href) { - return base().$href(href)._(); - } - - @Override - public TITLE> title() { - closeAttrs(); - return title_(this, false); - } - - @Override - public HTML title(String cdata) { - return title()._(cdata)._(); - } - - @Override - public STYLE> style() { - closeAttrs(); - return style_(this, false); - } - - @Override - public HTML style(Object... lines) { - return style().$type("text/css")._(lines)._(); - } - - @Override - public LINK> link() { - closeAttrs(); - return link_(this, false); - } - - @Override - public HTML link(String href) { - return setLinkHref(link(), href)._(); - } - - @Override - public META> meta() { - closeAttrs(); - return meta_(this, false); - } - - @Override - public HTML meta(String name, String content) { - return meta().$name(name).$content(content)._(); - } - - @Override - public HTML meta_http(String header, String content) { - return meta().$http_equiv(header).$content(content)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, false); - } - - @Override - public HTML script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public HTML address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public HTML _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public HTML hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public HTML h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public HTML h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public HTML h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public HTML h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public HTML h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public HTML h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public HTML h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public HTML h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public HTML h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public HTML h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public HTML h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public HTML h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public HTML ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public HTML del(String cdata) { - return del()._(cdata)._(); - } - } - - private HEAD head_(T e, boolean inline) { - return new HEAD("head", e, opt(true, inline, false)); } - - private BODY body_(T e, boolean inline) { - return new BODY("body", e, opt(true, inline, false)); } - - public class SCRIPT extends EImp implements HamletSpec.SCRIPT { - public SCRIPT(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public SCRIPT $type(String value) { - addAttr("type", value); - return this; - } - - @Override - public SCRIPT $src(String value) { - addAttr("src", value); - return this; - } - - @Override - public SCRIPT $charset(String value) { - addAttr("charset", value); - return this; - } - - @Override - public SCRIPT $defer(String value) { - addAttr("defer", value); - return this; - } - - @Override - public SCRIPT _(Object... lines) { - _p(false, lines); - return this; - } - } - - public class STYLE extends EImp implements HamletSpec.STYLE { - public STYLE(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public STYLE $type(String value) { - addAttr("type", value); - return this; - } - - @Override - public STYLE $media(EnumSet value) { - addMediaAttr("media", value); - return this; - } - - @Override - public STYLE $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public STYLE $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public STYLE $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public STYLE _(Object... lines) { - _p(false, lines); - return this; - } - } - - public class META extends EImp implements HamletSpec.META { - public META(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public META $http_equiv(String value) { - addAttr("http-equiv", value); - return this; - } - - @Override - public META $name(String value) { - addAttr("name", value); - return this; - } - - @Override - public META $content(String value) { - addAttr("content", value); - return this; - } - - @Override - public META $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public META $dir(Dir value) { - addAttr("dir", value); - return this; - } - } - - public class BASE extends EImp implements HamletSpec.BASE { - public BASE(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public BASE $href(String value) { - addAttr("href", value); - return this; - } - } - - public class TITLE extends EImp implements HamletSpec.TITLE { - public TITLE(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public TITLE $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public TITLE $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public TITLE _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public TITLE _r(Object... lines) { - _p(false, lines); - return this; - } - } - - public class HEAD extends EImp implements HamletSpec.HEAD { - public HEAD(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public HEAD $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public HEAD $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public BASE> base() { - closeAttrs(); - return base_(this, false); - } - - @Override - public HEAD base(String href) { - return base().$href(href)._(); - } - - @Override - public TITLE> title() { - closeAttrs(); - return title_(this, false); - } - - @Override - public HEAD title(String cdata) { - return title()._(cdata)._(); - } - - @Override - public STYLE> style() { - closeAttrs(); - return style_(this, false); - } - - @Override - public HEAD style(Object... lines) { - return style().$type("text/css")._(lines)._(); - } - - @Override - public LINK> link() { - closeAttrs(); - return link_(this, false); - } - - @Override - public HEAD link(String href) { - return setLinkHref(link(), href)._(); - } - - @Override - public META> meta() { - closeAttrs(); - return meta_(this, false); - } - - @Override - public HEAD meta(String name, String content) { - return meta().$name(name).$content(content)._(); - } - - @Override - public HEAD meta_http(String header, String content) { - return meta().$http_equiv(header).$content(content)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, false); - } - - @Override - public HEAD script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - } - - private BASE base_(T e, boolean inline) { - return new BASE("base", e, opt(false, inline, false)); } - - private TITLE title_(T e, boolean inline) { - return new TITLE("title", e, opt(true, inline, false)); } - - public class TD extends EImp implements HamletSpec.TD { - public TD(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public TD $headers(String value) { - addAttr("headers", value); - return this; - } - - @Override - public TD $scope(Scope value) { - addAttr("scope", value); - return this; - } - - @Override - public TD $rowspan(int value) { - addAttr("rowspan", value); - return this; - } - - @Override - public TD $colspan(int value) { - addAttr("colspan", value); - return this; - } - - @Override - public TD $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public TD $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public TD $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public TD $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public TD $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public TD $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public TD $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public TD $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public TD $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public TD $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public TD $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public TD $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public TD $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public TD $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public TD $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public TD $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public TD address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public TD _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public TD hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public TD h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public TD h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public TD h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public TD h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public TD h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public TD h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public TD h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public TD h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public TD h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public TD h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public TD h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public TD h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public TD _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public TD _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public TD b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public TD b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public TD i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public TD i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public TD small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public TD small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public TD em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public TD em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public TD strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public TD strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public TD dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public TD dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public TD code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public TD code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public TD samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public TD samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public TD kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public TD kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public TD var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public TD var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public TD cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public TD cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public TD abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public TD abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public TD a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public TD a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public TD img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public TD sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public TD sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public TD sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public TD sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public TD q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public TD q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public TD br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public TD bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public TD span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public TD span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public TD script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public TD ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public TD del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public TD label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public TD textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public TD button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class TH extends EImp implements HamletSpec.TH { - public TH(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public TH $headers(String value) { - addAttr("headers", value); - return this; - } - - @Override - public TH $scope(Scope value) { - addAttr("scope", value); - return this; - } - - @Override - public TH $rowspan(int value) { - addAttr("rowspan", value); - return this; - } - - @Override - public TH $colspan(int value) { - addAttr("colspan", value); - return this; - } - - @Override - public TH $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public TH $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public TH $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public TH $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public TH $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public TH $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public TH $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public TH $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public TH $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public TH $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public TH $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public TH $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public TH $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public TH $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public TH $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public TH $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public TH address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public TH _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public TH hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public TH h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public TH h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public TH h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public TH h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public TH h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public TH h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public TH h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public TH h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public TH h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public TH h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public TH h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public TH h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public TH _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public TH _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public TH b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public TH b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public TH i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public TH i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public TH small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public TH small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public TH em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public TH em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public TH strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public TH strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public TH dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public TH dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public TH code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public TH code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public TH samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public TH samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public TH kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public TH kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public TH var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public TH var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public TH cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public TH cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public TH abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public TH abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public TH a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public TH a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public TH img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public TH sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public TH sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public TH sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public TH sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public TH q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public TH q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public TH br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public TH bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public TH span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public TH span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public TH script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public TH ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public TH del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public TH label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public TH textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public TH button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class TR extends EImp implements HamletSpec.TR { - public TR(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public TR $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public TR $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public TR $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public TR $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public TR $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public TR $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public TR $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public TR $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public TR $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public TR $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public TR $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public TR $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public TR $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public TR $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public TR $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public TR $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TH> th() { - closeAttrs(); - return th_(this, false); - } - - @Override - public TR th(String cdata) { - return th()._(cdata)._(); - } - - @Override - public TR th(String selector, String cdata) { - return setSelector(th(), selector)._(cdata)._(); - } - - public TR th(String selector, String title, String cdata) { - return setSelector(th(), selector).$title(title)._(cdata)._(); - } - - @Override - public TD> td() { - closeAttrs(); - return td_(this, false); - } - - @Override - public TR td(String cdata) { - return td()._(cdata)._(); - } - - @Override - public TR td(String selector, String cdata) { - return setSelector(td(), selector)._(cdata)._(); - } - } - - private TH th_(T e, boolean inline) { - return new TH("th", e, opt(true, inline, false)); } - - private TD td_(T e, boolean inline) { - return new TD("td", e, opt(true, inline, false)); } - - public class COL extends EImp implements HamletSpec.COL { - public COL(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public COL $span(int value) { - addAttr("span", value); - return this; - } - - @Override - public COL $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public COL $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public COL $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public COL $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public COL $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public COL $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public COL $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public COL $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public COL $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public COL $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public COL $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public COL $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public COL $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public COL $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public COL $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public COL $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - } - - public class COLGROUP extends EImp implements HamletSpec.COLGROUP { - public COLGROUP(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public COLGROUP $span(int value) { - addAttr("span", value); - return this; - } - - @Override - public COLGROUP $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public COLGROUP $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public COLGROUP $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public COLGROUP $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public COLGROUP $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public COLGROUP $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public COLGROUP $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public COLGROUP $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public COLGROUP $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public COLGROUP $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public COLGROUP $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public COLGROUP $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public COLGROUP $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public COLGROUP $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public COLGROUP $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public COLGROUP $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public COL> col() { - closeAttrs(); - return col_(this, false); - } - - @Override - public COLGROUP col(String selector) { - return setSelector(col(), selector)._(); - } - } - - public class TBODY extends EImp implements HamletSpec.TBODY { - public TBODY(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public TBODY $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public TBODY $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public TBODY $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public TBODY $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public TBODY $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public TBODY $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public TBODY $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public TBODY $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public TBODY $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public TBODY $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public TBODY $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public TBODY $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public TBODY $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public TBODY $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public TBODY $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public TBODY $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TR> tr() { - closeAttrs(); - return tr_(this, false); - } - - @Override - public TR> tr(String selector) { - return setSelector(tr(), selector); - } - } - - public class TFOOT extends EImp implements HamletSpec.TFOOT { - public TFOOT(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public TFOOT $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public TFOOT $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public TFOOT $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public TFOOT $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public TFOOT $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public TFOOT $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public TFOOT $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public TFOOT $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public TFOOT $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public TFOOT $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public TFOOT $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public TFOOT $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public TFOOT $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public TFOOT $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public TFOOT $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public TFOOT $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TR> tr() { - closeAttrs(); - return tr_(this, false); - } - - @Override - public TR> tr(String selector) { - return setSelector(tr(), selector); - } - } - - public class THEAD extends EImp implements HamletSpec.THEAD { - public THEAD(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public THEAD $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public THEAD $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public THEAD $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public THEAD $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public THEAD $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public THEAD $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public THEAD $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public THEAD $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public THEAD $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public THEAD $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public THEAD $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public THEAD $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public THEAD $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public THEAD $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public THEAD $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public THEAD $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TR> tr() { - closeAttrs(); - return tr_(this, false); - } - - @Override - public TR> tr(String selector) { - return setSelector(tr(), selector); - } - } - - public class CAPTION extends EImp implements HamletSpec.CAPTION { - public CAPTION(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public CAPTION $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public CAPTION $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public CAPTION $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public CAPTION $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public CAPTION $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public CAPTION $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public CAPTION $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public CAPTION $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public CAPTION $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public CAPTION $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public CAPTION $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public CAPTION $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public CAPTION $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public CAPTION $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public CAPTION $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public CAPTION $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public CAPTION _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public CAPTION _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public CAPTION b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public CAPTION b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public CAPTION i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public CAPTION i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public CAPTION small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public CAPTION small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public CAPTION em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public CAPTION em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public CAPTION strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public CAPTION strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public CAPTION dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public CAPTION dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public CAPTION code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public CAPTION code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public CAPTION samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public CAPTION samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public CAPTION kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public CAPTION kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public CAPTION var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public CAPTION var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public CAPTION cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public CAPTION cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public CAPTION abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public CAPTION abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public CAPTION a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public CAPTION a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public CAPTION img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public CAPTION sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public CAPTION sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public CAPTION sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public CAPTION sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public CAPTION q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public CAPTION q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public CAPTION br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public CAPTION bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public CAPTION span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public CAPTION span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public CAPTION script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public CAPTION ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public CAPTION del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public CAPTION label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public CAPTION textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public CAPTION button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class TABLE extends EImp implements HamletSpec.TABLE { - public TABLE(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public TABLE $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public TABLE $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public TABLE $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public TABLE $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public TABLE $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public TABLE $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public TABLE $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public TABLE $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public TABLE $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public TABLE $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public TABLE $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public TABLE $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public TABLE $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public TABLE $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public TABLE $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public TABLE $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE caption(String cdata) { - return caption()._(cdata)._(); - } - - @Override - public CAPTION> caption() { - closeAttrs(); - return caption_(this, false); - } - - @Override - public COLGROUP> colgroup() { - closeAttrs(); - return colgroup_(this, false); - } - - @Override - public THEAD> thead(String selector) { - return setSelector(thead(), selector); - } - - @Override - public THEAD> thead() { - closeAttrs(); - return thead_(this, false); - } - - @Override - public TFOOT> tfoot() { - closeAttrs(); - return tfoot_(this, false); - } - - @Override - public TFOOT> tfoot(String selector) { - return setSelector(tfoot(), selector); - } - - @Override - public TBODY> tbody() { - closeAttrs(); - return tbody_(this, false); - } - - @Override - public TBODY> tbody(String selector) { - return setSelector(tbody(), selector); - } - - @Override - public TR> tr() { - closeAttrs(); - return tr_(this, false); - } - - @Override - public TR> tr(String selector) { - return setSelector(tr(), selector); - } - - @Override - public COL> col() { - closeAttrs(); - return col_(this, false); - } - - @Override - public TABLE col(String selector) { - return setSelector(col(), selector)._(); - } - } - - private CAPTION caption_(T e, boolean inline) { - return new CAPTION("caption", e, opt(true, inline, false)); } - - private COLGROUP colgroup_(T e, boolean inline) { - return new COLGROUP("colgroup", e, opt(false, inline, false)); } - - private THEAD thead_(T e, boolean inline) { - return new THEAD("thead", e, opt(true, inline, false)); } - - private TFOOT tfoot_(T e, boolean inline) { - return new TFOOT("tfoot", e, opt(true, inline, false)); } - - private TBODY tbody_(T e, boolean inline) { - return new TBODY("tbody", e, opt(true, inline, false)); } - - private COL col_(T e, boolean inline) { - return new COL("col", e, opt(false, inline, false)); } - - private TR tr_(T e, boolean inline) { - return new TR("tr", e, opt(true, inline, false)); } - - public class BUTTON extends EImp implements HamletSpec.BUTTON { - public BUTTON(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public BUTTON $type(ButtonType value) { - addAttr("type", value); - return this; - } - - @Override - public BUTTON $name(String value) { - addAttr("name", value); - return this; - } - - @Override - public BUTTON $value(String value) { - addAttr("value", value); - return this; - } - - @Override - public BUTTON $disabled() { - addAttr("disabled", null); - return this; - } - - @Override - public BUTTON $tabindex(int value) { - addAttr("tabindex", value); - return this; - } - - @Override - public BUTTON $accesskey(String value) { - addAttr("accesskey", value); - return this; - } - - @Override - public BUTTON $onfocus(String value) { - addAttr("onfocus", value); - return this; - } - - @Override - public BUTTON $onblur(String value) { - addAttr("onblur", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public BUTTON address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public BUTTON _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public BUTTON hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BUTTON h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public BUTTON h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public BUTTON h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public BUTTON h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public BUTTON h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public BUTTON h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public BUTTON h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public BUTTON h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public BUTTON h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public BUTTON h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public BUTTON h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public BUTTON h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public BUTTON _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public BUTTON _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public BUTTON b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public BUTTON b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public BUTTON i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public BUTTON i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public BUTTON small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public BUTTON small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public BUTTON em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public BUTTON em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public BUTTON strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public BUTTON strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public BUTTON dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public BUTTON dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public BUTTON code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public BUTTON code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public BUTTON samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public BUTTON samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public BUTTON kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public BUTTON kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public BUTTON var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public BUTTON var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public BUTTON cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public BUTTON cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public BUTTON abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public BUTTON abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public BUTTON q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public BUTTON q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public BUTTON br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public BUTTON bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public BUTTON span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public BUTTON span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public BUTTON script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public BUTTON ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public BUTTON del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public BUTTON img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public BUTTON sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public BUTTON sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public BUTTON sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public BUTTON sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public BUTTON $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public BUTTON $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public BUTTON $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public BUTTON $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public BUTTON $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public BUTTON $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public BUTTON $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public BUTTON $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public BUTTON $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public BUTTON $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public BUTTON $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public BUTTON $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public BUTTON $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public BUTTON $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public BUTTON $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public BUTTON $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - } - - public class LEGEND extends EImp implements HamletSpec.LEGEND { - public LEGEND(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public LEGEND $accesskey(String value) { - addAttr("accesskey", value); - return this; - } - - @Override - public LEGEND $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public LEGEND $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public LEGEND $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public LEGEND $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public LEGEND $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public LEGEND $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public LEGEND $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public LEGEND $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public LEGEND $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public LEGEND $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public LEGEND $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public LEGEND $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public LEGEND $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public LEGEND $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public LEGEND $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public LEGEND $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public LEGEND _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public LEGEND _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public LEGEND b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public LEGEND b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public LEGEND i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public LEGEND i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public LEGEND small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public LEGEND small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public LEGEND em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public LEGEND em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public LEGEND strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public LEGEND strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public LEGEND dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public LEGEND dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public LEGEND code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public LEGEND code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public LEGEND samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public LEGEND samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public LEGEND kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public LEGEND kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public LEGEND var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public LEGEND var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public LEGEND cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public LEGEND cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public LEGEND abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public LEGEND abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public LEGEND a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public LEGEND a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public LEGEND img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public LEGEND sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public LEGEND sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public LEGEND sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public LEGEND sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public LEGEND q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public LEGEND q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public LEGEND br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public LEGEND bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public LEGEND span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public LEGEND span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public LEGEND script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public LEGEND ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public LEGEND del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public LEGEND label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public LEGEND textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public LEGEND button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class FIELDSET extends EImp implements HamletSpec.FIELDSET { - public FIELDSET(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public FIELDSET $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public FIELDSET $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public FIELDSET $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public FIELDSET $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public FIELDSET $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public FIELDSET $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public FIELDSET $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public FIELDSET $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public FIELDSET $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public FIELDSET $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public FIELDSET $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public FIELDSET $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public FIELDSET $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public FIELDSET $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public FIELDSET $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public FIELDSET $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public LEGEND> legend() { - closeAttrs(); - return legend_(this, false); - } - - @Override - public FIELDSET legend(String cdata) { - return legend()._(cdata)._(); - } - - @Override - public FIELDSET _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public FIELDSET _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public FIELDSET address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public FIELDSET _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public FIELDSET hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public FIELDSET h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public FIELDSET h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public FIELDSET h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public FIELDSET h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public FIELDSET h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public FIELDSET h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public FIELDSET h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public FIELDSET h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public FIELDSET h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public FIELDSET h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public FIELDSET h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public FIELDSET h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public FIELDSET b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public FIELDSET b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public FIELDSET i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public FIELDSET i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public FIELDSET small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public FIELDSET small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public FIELDSET em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public FIELDSET em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public FIELDSET strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public FIELDSET strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public FIELDSET dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public FIELDSET dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public FIELDSET code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public FIELDSET code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public FIELDSET samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public FIELDSET samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public FIELDSET kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public FIELDSET kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public FIELDSET var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public FIELDSET var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public FIELDSET cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public FIELDSET cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public FIELDSET abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public FIELDSET abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public FIELDSET a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public FIELDSET a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public FIELDSET img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public FIELDSET sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public FIELDSET sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public FIELDSET sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public FIELDSET sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public FIELDSET q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public FIELDSET q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public FIELDSET br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public FIELDSET bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public FIELDSET span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public FIELDSET span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public FIELDSET script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public FIELDSET ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public FIELDSET del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public FIELDSET label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public FIELDSET textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public FIELDSET button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - private LEGEND legend_(T e, boolean inline) { - return new LEGEND("legend", e, opt(true, inline, false)); } - - public class TEXTAREA extends EImp implements HamletSpec.TEXTAREA { - public TEXTAREA(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public TEXTAREA $name(String value) { - addAttr("name", value); - return this; - } - - @Override - public TEXTAREA $disabled() { - addAttr("disabled", null); - return this; - } - - @Override - public TEXTAREA $tabindex(int value) { - addAttr("tabindex", value); - return this; - } - - @Override - public TEXTAREA $accesskey(String value) { - addAttr("accesskey", value); - return this; - } - - @Override - public TEXTAREA $onfocus(String value) { - addAttr("onfocus", value); - return this; - } - - @Override - public TEXTAREA $onblur(String value) { - addAttr("onblur", value); - return this; - } - - @Override - public TEXTAREA $rows(int value) { - addAttr("rows", value); - return this; - } - - @Override - public TEXTAREA $cols(int value) { - addAttr("cols", value); - return this; - } - - @Override - public TEXTAREA $readonly() { - addAttr("readonly", null); - return this; - } - - @Override - public TEXTAREA $onselect(String value) { - addAttr("onselect", value); - return this; - } - - @Override - public TEXTAREA $onchange(String value) { - addAttr("onchange", value); - return this; - } - - @Override - public TEXTAREA $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public TEXTAREA $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public TEXTAREA $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public TEXTAREA $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public TEXTAREA $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public TEXTAREA $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public TEXTAREA $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public TEXTAREA $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public TEXTAREA $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public TEXTAREA $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public TEXTAREA $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public TEXTAREA $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public TEXTAREA $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public TEXTAREA $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public TEXTAREA $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public TEXTAREA $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TEXTAREA _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public TEXTAREA _r(Object... lines) { - _p(false, lines); - return this; - } - } - - public class OPTION extends EImp implements HamletSpec.OPTION { - public OPTION(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public OPTION $value(String value) { - addAttr("value", value); - return this; - } - - @Override - public OPTION $disabled() { - addAttr("disabled", null); - return this; - } - - @Override - public OPTION $selected() { - addAttr("selected", null); - return this; - } - - @Override - public OPTION $label(String value) { - addAttr("label", value); - return this; - } - - @Override - public OPTION $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public OPTION $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public OPTION $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public OPTION $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public OPTION $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public OPTION $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public OPTION $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public OPTION $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public OPTION $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public OPTION $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public OPTION $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public OPTION $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public OPTION $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public OPTION $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public OPTION $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public OPTION $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public OPTION _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public OPTION _r(Object... lines) { - _p(false, lines); - return this; - } - } - - public class OPTGROUP extends EImp implements HamletSpec.OPTGROUP { - public OPTGROUP(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public OPTGROUP $disabled() { - addAttr("disabled", null); - return this; - } - - @Override - public OPTGROUP $label(String value) { - addAttr("label", value); - return this; - } - - @Override - public OPTGROUP $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public OPTGROUP $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public OPTGROUP $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public OPTGROUP $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public OPTGROUP $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public OPTGROUP $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public OPTGROUP $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public OPTGROUP $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public OPTGROUP $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public OPTGROUP $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public OPTGROUP $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public OPTGROUP $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public OPTGROUP $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public OPTGROUP $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public OPTGROUP $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public OPTGROUP $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public OPTION> option() { - closeAttrs(); - return option_(this, false); - } - - @Override - public OPTGROUP option(String cdata) { - return option()._(cdata)._(); - } - } - - private OPTGROUP optgroup_(T e, boolean inline) { - return new OPTGROUP("optgroup", e, opt(true, inline, false)); } - - public class SELECT extends EImp implements HamletSpec.SELECT { - public SELECT(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public SELECT $name(String value) { - addAttr("name", value); - return this; - } - - @Override - public SELECT $disabled() { - addAttr("disabled", null); - return this; - } - - @Override - public SELECT $tabindex(int value) { - addAttr("tabindex", value); - return this; - } - - @Override - public SELECT $onfocus(String value) { - addAttr("onfocus", value); - return this; - } - - @Override - public SELECT $onblur(String value) { - addAttr("onblur", value); - return this; - } - - @Override - public SELECT $onchange(String value) { - addAttr("onchange", value); - return this; - } - - @Override - public OPTGROUP> optgroup() { - closeAttrs(); - return optgroup_(this, false); - } - - @Override - public SELECT $size(int value) { - addAttr("size", value); - return this; - } - - @Override - public SELECT $multiple() { - addAttr("multiple", null); - return this; - } - - @Override - public SELECT $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public SELECT $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public SELECT $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public SELECT $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public SELECT $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public SELECT $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public SELECT $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public SELECT $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public SELECT $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public SELECT $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public SELECT $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public SELECT $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public SELECT $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public SELECT $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public SELECT $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public SELECT $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public OPTION> option() { - closeAttrs(); - return option_(this, false); - } - - @Override - public SELECT option(String cdata) { - return option()._(cdata)._(); - } - } - - private OPTION option_(T e, boolean inline) { - return new OPTION("option", e, opt(false, inline, false)); } - - public class INPUT extends EImp implements HamletSpec.INPUT { - public INPUT(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public INPUT $type(InputType value) { - addAttr("type", value); - return this; - } - - @Override - public INPUT $src(String value) { - addAttr("src", value); - return this; - } - - @Override - public INPUT $name(String value) { - addAttr("name", value); - return this; - } - - @Override - public INPUT $value(String value) { - addAttr("value", value); - return this; - } - - @Override - public INPUT $disabled() { - addAttr("disabled", null); - return this; - } - - @Override - public INPUT $tabindex(int value) { - addAttr("tabindex", value); - return this; - } - - @Override - public INPUT $accesskey(String value) { - addAttr("accesskey", value); - return this; - } - - @Override - public INPUT $onfocus(String value) { - addAttr("onfocus", value); - return this; - } - - @Override - public INPUT $onblur(String value) { - addAttr("onblur", value); - return this; - } - - @Override - public INPUT $readonly() { - addAttr("readonly", null); - return this; - } - - @Override - public INPUT $onselect(String value) { - addAttr("onselect", value); - return this; - } - - @Override - public INPUT $onchange(String value) { - addAttr("onchange", value); - return this; - } - - @Override - public INPUT $size(String value) { - addAttr("size", value); - return this; - } - - @Override - public INPUT $checked() { - addAttr("checked", null); - return this; - } - - @Override - public INPUT $maxlength(int value) { - addAttr("maxlength", value); - return this; - } - - @Override - public INPUT $alt(String value) { - addAttr("alt", value); - return this; - } - - @Override - public INPUT $ismap() { - addAttr("ismap", null); - return this; - } - - @Override - public INPUT $accept(String value) { - addAttr("accept", value); - return this; - } - - @Override - public INPUT $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public INPUT $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public INPUT $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public INPUT $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public INPUT $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public INPUT $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public INPUT $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public INPUT $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public INPUT $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public INPUT $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public INPUT $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public INPUT $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public INPUT $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public INPUT $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public INPUT $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public INPUT $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - } - - public class LABEL extends EImp implements HamletSpec.LABEL { - public LABEL(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public LABEL $accesskey(String value) { - addAttr("accesskey", value); - return this; - } - - @Override - public LABEL $onfocus(String value) { - addAttr("onfocus", value); - return this; - } - - @Override - public LABEL $onblur(String value) { - addAttr("onblur", value); - return this; - } - - @Override - public LABEL $for(String value) { - addAttr("for", value); - return this; - } - - @Override - public LABEL $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public LABEL $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public LABEL $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public LABEL $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public LABEL $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public LABEL $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public LABEL $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public LABEL $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public LABEL $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public LABEL $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public LABEL $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public LABEL $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public LABEL $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public LABEL $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public LABEL $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public LABEL $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public LABEL _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public LABEL _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public LABEL b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public LABEL b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public LABEL i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public LABEL i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public LABEL small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public LABEL small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public LABEL em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public LABEL em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public LABEL strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public LABEL strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public LABEL dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public LABEL dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public LABEL code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public LABEL code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public LABEL samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public LABEL samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public LABEL kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public LABEL kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public LABEL var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public LABEL var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public LABEL cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public LABEL cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public LABEL abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public LABEL abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public LABEL a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public LABEL a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public LABEL img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public LABEL sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public LABEL sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public LABEL sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public LABEL sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public LABEL q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public LABEL q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public LABEL br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public LABEL bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public LABEL span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public LABEL span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public LABEL script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public LABEL ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public LABEL del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public LABEL textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public LABEL button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class FORM extends EImp implements HamletSpec.FORM { - public FORM(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public FORM $name(String value) { - addAttr("name", value); - return this; - } - - @Override - public FORM $accept(String value) { - addAttr("accept", value); - return this; - } - - @Override - public FORM $action(String value) { - addAttr("action", value); - return this; - } - - @Override - public FORM $method(Method value) { - addAttr("method", value); - return this; - } - - @Override - public FORM $enctype(String value) { - addAttr("enctype", value); - return this; - } - - @Override - public FORM $onsubmit(String value) { - addAttr("onsubmit", value); - return this; - } - - @Override - public FORM $onreset(String value) { - addAttr("onreset", value); - return this; - } - - @Override - public FORM $accept_charset(String value) { - addAttr("accept-charset", value); - return this; - } - - @Override - public FORM $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public FORM $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public FORM $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public FORM $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public FORM $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public FORM $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public FORM $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public FORM $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public FORM $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public FORM $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public FORM $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public FORM $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public FORM $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public FORM $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public FORM $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public FORM $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public FORM script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public FORM address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public FORM _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public FORM hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public FORM h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public FORM h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public FORM h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public FORM h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public FORM h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public FORM h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public FORM h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public FORM h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public FORM h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public FORM h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public FORM h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public FORM h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - } - - public class LI extends EImp implements HamletSpec.LI { - public LI(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public LI $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public LI $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public LI $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public LI $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public LI $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public LI $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public LI $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public LI $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public LI $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public LI $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public LI $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public LI $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public LI $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public LI $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public LI $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public LI $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public LI address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public LI _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public LI hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public LI h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public LI h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public LI h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public LI h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public LI h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public LI h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public LI h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public LI h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public LI h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public LI h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public LI h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public LI h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public LI _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public LI _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public LI b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public LI b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public LI i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public LI i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public LI small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public LI small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public LI em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public LI em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public LI strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public LI strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public LI dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public LI dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public LI code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public LI code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public LI samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public LI samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public LI kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public LI kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public LI var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public LI var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public LI cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public LI cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public LI abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public LI abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public LI a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public LI a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public LI img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public LI sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public LI sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public LI sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public LI sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public LI q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public LI q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public LI br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public LI bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public LI span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public LI span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public LI script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public LI ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public LI del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public LI label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public LI textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public LI button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class UL extends EImp implements HamletSpec.UL { - public UL(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public UL $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public UL $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public UL $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public UL $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public UL $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public UL $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public UL $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public UL $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public UL $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public UL $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public UL $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public UL $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public UL $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public UL $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public UL $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public UL $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public LI> li() { - closeAttrs(); - return li_(this, false); - } - - @Override - public UL li(String cdata) { - return li()._(cdata)._(); - } - } - - public class OL extends EImp implements HamletSpec.OL { - public OL(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public OL $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public OL $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public OL $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public OL $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public OL $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public OL $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public OL $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public OL $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public OL $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public OL $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public OL $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public OL $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public OL $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public OL $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public OL $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public OL $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public LI> li() { - closeAttrs(); - return li_(this, false); - } - - @Override - public OL li(String cdata) { - return li()._(cdata)._(); - } - } - - private LI li_(T e, boolean inline) { - return new LI("li", e, opt(false, inline, false)); } - - public class DD extends EImp implements HamletSpec.DD { - public DD(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public DD $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public DD $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public DD $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public DD $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public DD $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public DD $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public DD $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public DD $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public DD $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public DD $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public DD $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public DD $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public DD $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public DD $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public DD $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public DD $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public DD address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public DD _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public DD hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public DD h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public DD h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public DD h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public DD h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public DD h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public DD h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public DD h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public DD h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public DD h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public DD h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public DD h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public DD h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public DD _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public DD _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public DD b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public DD b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public DD i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public DD i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public DD small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public DD small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public DD em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public DD em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public DD strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public DD strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public DD dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public DD dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public DD code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public DD code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public DD samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public DD samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public DD kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public DD kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public DD var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public DD var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public DD cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public DD cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public DD abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public DD abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public DD a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public DD a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public DD img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public DD sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public DD sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public DD sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public DD sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public DD q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public DD q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public DD br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public DD bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public DD span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public DD span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public DD script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public DD ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public DD del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public DD label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public DD textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public DD button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class DT extends EImp implements HamletSpec.DT { - public DT(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public DT $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public DT $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public DT $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public DT $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public DT $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public DT $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public DT $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public DT $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public DT $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public DT $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public DT $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public DT $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public DT $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public DT $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public DT $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public DT $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public DT _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public DT _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public DT b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public DT b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public DT i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public DT i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public DT small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public DT small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public DT em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public DT em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public DT strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public DT strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public DT dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public DT dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public DT code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public DT code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public DT samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public DT samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public DT kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public DT kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public DT var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public DT var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public DT cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public DT cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public DT abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public DT abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public DT a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public DT a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public DT img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public DT sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public DT sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public DT sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public DT sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public DT q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public DT q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public DT br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public DT bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public DT span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public DT span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public DT script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public DT ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public DT del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public DT label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public DT textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public DT button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class DL extends EImp implements HamletSpec.DL { - public DL(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public DL $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public DL $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public DL $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public DL $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public DL $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public DL $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public DL $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public DL $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public DL $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public DL $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public DL $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public DL $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public DL $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public DL $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public DL $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public DL $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public DT> dt() { - closeAttrs(); - return dt_(this, false); - } - - @Override - public DL dt(String cdata) { - return dt()._(cdata)._(); - } - - @Override - public DD> dd() { - closeAttrs(); - return dd_(this, false); - } - - @Override - public DL dd(String cdata) { - return dd()._(cdata)._(); - } - } - - private DT dt_(T e, boolean inline) { - return new DT("dt", e, opt(false, inline, false)); } - - private DD dd_(T e, boolean inline) { - return new DD("dd", e, opt(false, inline, false)); } - - public class DEL extends EImp implements HamletSpec.DEL { - public DEL(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public DEL $cite(String value) { - addAttr("cite", value); - return this; - } - - @Override - public DEL $datetime(String value) { - addAttr("datetime", value); - return this; - } - - @Override - public DEL $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public DEL $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public DEL $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public DEL $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public DEL $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public DEL $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public DEL $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public DEL $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public DEL $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public DEL $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public DEL $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public DEL $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public DEL $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public DEL $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public DEL $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public DEL $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public DEL address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public DEL _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public DEL hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public DEL h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public DEL h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public DEL h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public DEL h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public DEL h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public DEL h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public DEL h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public DEL h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public DEL h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public DEL h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public DEL h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public DEL h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public DEL _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public DEL _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public DEL b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public DEL b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public DEL i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public DEL i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public DEL small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public DEL small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public DEL em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public DEL em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public DEL strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public DEL strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public DEL dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public DEL dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public DEL code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public DEL code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public DEL samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public DEL samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public DEL kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public DEL kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public DEL var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public DEL var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public DEL cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public DEL cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public DEL abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public DEL abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public DEL a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public DEL a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public DEL img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public DEL sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public DEL sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public DEL sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public DEL sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public DEL q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public DEL q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public DEL br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public DEL bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public DEL span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public DEL span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public DEL script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public DEL ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public DEL del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public DEL label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public DEL textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public DEL button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class INS extends EImp implements HamletSpec.INS { - public INS(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public INS $cite(String value) { - addAttr("cite", value); - return this; - } - - @Override - public INS $datetime(String value) { - addAttr("datetime", value); - return this; - } - - @Override - public INS $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public INS $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public INS $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public INS $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public INS $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public INS $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public INS $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public INS $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public INS $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public INS $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public INS $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public INS $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public INS $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public INS $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public INS $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public INS $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public INS address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public INS _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public INS hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public INS h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public INS h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public INS h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public INS h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public INS h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public INS h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public INS h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public INS h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public INS h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public INS h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public INS h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public INS h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public INS _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public INS _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public INS b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public INS b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public INS i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public INS i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public INS small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public INS small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public INS em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public INS em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public INS strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public INS strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public INS dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public INS dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public INS code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public INS code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public INS samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public INS samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public INS kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public INS kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public INS var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public INS var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public INS cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public INS cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public INS abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public INS abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public INS a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public INS a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public INS img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public INS sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public INS sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public INS sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public INS sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public INS q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public INS q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public INS br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public INS bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public INS span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public INS span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public INS script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public INS ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public INS del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public INS label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public INS textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public INS button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class BLOCKQUOTE extends EImp implements HamletSpec.BLOCKQUOTE { - public BLOCKQUOTE(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public BLOCKQUOTE $cite(String value) { - addAttr("cite", value); - return this; - } - - @Override - public BLOCKQUOTE $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public BLOCKQUOTE $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public BLOCKQUOTE $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public BLOCKQUOTE $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public BLOCKQUOTE $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public BLOCKQUOTE $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public BLOCKQUOTE $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public BLOCKQUOTE $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public BLOCKQUOTE $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public BLOCKQUOTE $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public BLOCKQUOTE $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public BLOCKQUOTE $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public BLOCKQUOTE $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public BLOCKQUOTE $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public BLOCKQUOTE $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public BLOCKQUOTE $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public BLOCKQUOTE address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public BLOCKQUOTE _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public BLOCKQUOTE hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public BLOCKQUOTE h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public BLOCKQUOTE h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public BLOCKQUOTE h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public BLOCKQUOTE h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public BLOCKQUOTE h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public BLOCKQUOTE h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public BLOCKQUOTE h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public BLOCKQUOTE h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public BLOCKQUOTE h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public BLOCKQUOTE h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public BLOCKQUOTE h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public BLOCKQUOTE script(String src) { - return setScriptSrc(script(), src)._(); - } - } - - public class Q extends EImp implements HamletSpec.Q { - public Q(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public Q $cite(String value) { - addAttr("cite", value); - return this; - } - - @Override - public Q $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public Q $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public Q $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public Q $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public Q $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public Q $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public Q $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public Q $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public Q $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public Q $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public Q $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public Q $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public Q $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public Q $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public Q $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public Q $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public Q _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public Q _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public Q b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public Q b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public Q i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public Q i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public Q small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public Q small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public Q em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public Q em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public Q strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public Q strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public Q dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public Q dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public Q code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public Q code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public Q samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public Q samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public Q kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public Q kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public Q var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public Q var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public Q cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public Q cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public Q abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public Q abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public Q a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public Q a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public Q img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public Q sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public Q sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public Q sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public Q sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public Q q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public Q q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public Q br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public Q bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public Q span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public Q span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public Q script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public Q ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public Q del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public Q label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public Q textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public Q button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class PRE extends EImp implements HamletSpec.PRE { - public PRE(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public PRE $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public PRE $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public PRE $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public PRE $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public PRE $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public PRE $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public PRE $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public PRE $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public PRE $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public PRE $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public PRE $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public PRE $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public PRE $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public PRE $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public PRE $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public PRE $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public PRE _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public PRE _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public PRE b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public PRE b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public PRE i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public PRE i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public PRE em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public PRE em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public PRE strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public PRE strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public PRE dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public PRE dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public PRE code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public PRE code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public PRE samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public PRE samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public PRE kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public PRE kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public PRE var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public PRE var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public PRE cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public PRE cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public PRE abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public PRE abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public PRE a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public PRE a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public PRE q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public PRE q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public PRE br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public PRE bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public PRE span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public PRE span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public PRE script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public PRE ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public PRE del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public PRE label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public PRE textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public PRE button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class H6 extends EImp implements HamletSpec.H6 { - public H6(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public H6 $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public H6 $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public H6 $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public H6 $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public H6 $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public H6 $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public H6 $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public H6 $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public H6 $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public H6 $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public H6 $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public H6 $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public H6 $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public H6 $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public H6 $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public H6 $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public H6 _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public H6 _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public H6 b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public H6 b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public H6 i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public H6 i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public H6 small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public H6 small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public H6 em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public H6 em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public H6 strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public H6 strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public H6 dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public H6 dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public H6 code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public H6 code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public H6 samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public H6 samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public H6 kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public H6 kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public H6 var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public H6 var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public H6 cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public H6 cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public H6 abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public H6 abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public H6 a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public H6 a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public H6 img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public H6 sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public H6 sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public H6 sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public H6 sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public H6 q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public H6 q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public H6 br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public H6 bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public H6 span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public H6 span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public H6 script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public H6 ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public H6 del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public H6 label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public H6 textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public H6 button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class H5 extends EImp implements HamletSpec.H5 { - public H5(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public H5 $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public H5 $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public H5 $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public H5 $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public H5 $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public H5 $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public H5 $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public H5 $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public H5 $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public H5 $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public H5 $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public H5 $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public H5 $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public H5 $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public H5 $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public H5 $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public H5 _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public H5 _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public H5 b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public H5 b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public H5 i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public H5 i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public H5 small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public H5 small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public H5 em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public H5 em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public H5 strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public H5 strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public H5 dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public H5 dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public H5 code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public H5 code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public H5 samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public H5 samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public H5 kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public H5 kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public H5 var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public H5 var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public H5 cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public H5 cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public H5 abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public H5 abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public H5 a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public H5 a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public H5 img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public H5 sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public H5 sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public H5 sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public H5 sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public H5 q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public H5 q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public H5 br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public H5 bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public H5 span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public H5 span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public H5 script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public H5 ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public H5 del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public H5 label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public H5 textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public H5 button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class H4 extends EImp implements HamletSpec.H4 { - public H4(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public H4 $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public H4 $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public H4 $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public H4 $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public H4 $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public H4 $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public H4 $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public H4 $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public H4 $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public H4 $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public H4 $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public H4 $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public H4 $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public H4 $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public H4 $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public H4 $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public H4 _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public H4 _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public H4 b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public H4 b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public H4 i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public H4 i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public H4 small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public H4 small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public H4 em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public H4 em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public H4 strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public H4 strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public H4 dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public H4 dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public H4 code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public H4 code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public H4 samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public H4 samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public H4 kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public H4 kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public H4 var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public H4 var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public H4 cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public H4 cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public H4 abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public H4 abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public H4 a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public H4 a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public H4 img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public H4 sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public H4 sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public H4 sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public H4 sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public H4 q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public H4 q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public H4 br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public H4 bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public H4 span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public H4 span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public H4 script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public H4 ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public H4 del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public H4 label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public H4 textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public H4 button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class H3 extends EImp implements HamletSpec.H3 { - public H3(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public H3 $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public H3 $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public H3 $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public H3 $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public H3 $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public H3 $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public H3 $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public H3 $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public H3 $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public H3 $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public H3 $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public H3 $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public H3 $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public H3 $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public H3 $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public H3 $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public H3 _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public H3 _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public H3 b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public H3 b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public H3 i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public H3 i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public H3 small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public H3 small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public H3 em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public H3 em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public H3 strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public H3 strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public H3 dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public H3 dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public H3 code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public H3 code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public H3 samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public H3 samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public H3 kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public H3 kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public H3 var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public H3 var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public H3 cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public H3 cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public H3 abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public H3 abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public H3 a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public H3 a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public H3 img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public H3 sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public H3 sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public H3 sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public H3 sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public H3 q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public H3 q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public H3 br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public H3 bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public H3 span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public H3 span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public H3 script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public H3 ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public H3 del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public H3 label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public H3 textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public H3 button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class H2 extends EImp implements HamletSpec.H2 { - public H2(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public H2 $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public H2 $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public H2 $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public H2 $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public H2 $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public H2 $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public H2 $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public H2 $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public H2 $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public H2 $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public H2 $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public H2 $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public H2 $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public H2 $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public H2 $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public H2 $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public H2 _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public H2 _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public H2 b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public H2 b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public H2 i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public H2 i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public H2 small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public H2 small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public H2 em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public H2 em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public H2 strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public H2 strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public H2 dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public H2 dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public H2 code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public H2 code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public H2 samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public H2 samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public H2 kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public H2 kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public H2 var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public H2 var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public H2 cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public H2 cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public H2 abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public H2 abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public H2 a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public H2 a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public H2 img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public H2 sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public H2 sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public H2 sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public H2 sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public H2 q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public H2 q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public H2 br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public H2 bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public H2 span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public H2 span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public H2 script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public H2 ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public H2 del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public H2 label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public H2 textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public H2 button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class H1 extends EImp implements HamletSpec.H1 { - public H1(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public H1 $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public H1 $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public H1 $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public H1 $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public H1 $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public H1 $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public H1 $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public H1 $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public H1 $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public H1 $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public H1 $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public H1 $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public H1 $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public H1 $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public H1 $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public H1 $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public H1 _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public H1 _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public H1 b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public H1 b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public H1 i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public H1 i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public H1 small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public H1 small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public H1 em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public H1 em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public H1 strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public H1 strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public H1 dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public H1 dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public H1 code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public H1 code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public H1 samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public H1 samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public H1 kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public H1 kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public H1 var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public H1 var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public H1 cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public H1 cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public H1 abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public H1 abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public H1 a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public H1 a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public H1 img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public H1 sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public H1 sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public H1 sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public H1 sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public H1 q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public H1 q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public H1 br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public H1 bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public H1 span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public H1 span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public H1 script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public H1 ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public H1 del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public H1 label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public H1 textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public H1 button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class P extends EImp implements HamletSpec.P { - public P(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public P $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public P $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public P $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public P $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public P $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public P $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public P $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public P $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public P $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public P $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public P $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public P $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public P $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public P $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public P $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public P $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public P _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public P _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public P b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public P b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public P i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public P i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public P small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public P small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public P em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public P em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public P strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public P strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public P dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public P dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public P code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public P code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public P samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public P samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public P kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public P kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public P var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public P var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public P cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public P cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public P abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public P abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public P a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public P a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public P img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public P sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public P sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public P sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public P sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public P q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public P q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public P br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public P bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public P span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public P span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public P script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public P ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public P del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public P label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public P textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public P button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class HR extends EImp implements HamletSpec.HR { - public HR(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public HR $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public HR $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public HR $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public HR $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public HR $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public HR $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public HR $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public HR $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public HR $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public HR $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public HR $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public HR $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public HR $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public HR $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public HR $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public HR $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - } - - public class PARAM extends EImp implements HamletSpec.PARAM { - public PARAM(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public PARAM $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public PARAM $name(String value) { - addAttr("name", value); - return this; - } - - @Override - public PARAM $value(String value) { - addAttr("value", value); - return this; - } - } - - public class OBJECT extends EImp implements HamletSpec.OBJECT { - public OBJECT(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public OBJECT $type(String value) { - addAttr("type", value); - return this; - } - - @Override - public OBJECT $name(String value) { - addAttr("name", value); - return this; - } - - @Override - public OBJECT $tabindex(int value) { - addAttr("tabindex", value); - return this; - } - - @Override - public OBJECT $data(String value) { - addAttr("data", value); - return this; - } - - @Override - public OBJECT $height(String value) { - addAttr("height", value); - return this; - } - - @Override - public OBJECT $height(int value) { - addAttr("height", value); - return this; - } - - @Override - public OBJECT $width(int value) { - addAttr("width", value); - return this; - } - - @Override - public OBJECT $width(String value) { - addAttr("width", value); - return this; - } - - @Override - public OBJECT $usemap(String value) { - addAttr("usemap", value); - return this; - } - - @Override - public OBJECT $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public OBJECT $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public OBJECT $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public OBJECT $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public OBJECT $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public OBJECT $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public OBJECT $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public OBJECT $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public OBJECT $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public OBJECT $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public OBJECT $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public OBJECT $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public OBJECT $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public OBJECT $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public OBJECT $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public OBJECT $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public PARAM> param() { - closeAttrs(); - return param_(this, false); - } - - @Override - public OBJECT param(String name, String value) { - return param().$name(name).$value(value)._(); - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public OBJECT address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public OBJECT _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public OBJECT hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public OBJECT h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public OBJECT h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public OBJECT h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public OBJECT h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public OBJECT h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public OBJECT h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public OBJECT h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public OBJECT h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public OBJECT h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public OBJECT h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public OBJECT h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public OBJECT h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public OBJECT _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public OBJECT _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public OBJECT b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public OBJECT b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public OBJECT i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public OBJECT i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public OBJECT small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public OBJECT small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public OBJECT em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public OBJECT em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public OBJECT strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public OBJECT strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public OBJECT dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public OBJECT dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public OBJECT code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public OBJECT code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public OBJECT samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public OBJECT samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public OBJECT kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public OBJECT kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public OBJECT var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public OBJECT var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public OBJECT cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public OBJECT cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public OBJECT abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public OBJECT abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public OBJECT a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public OBJECT a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public OBJECT img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public OBJECT sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public OBJECT sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public OBJECT sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public OBJECT sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public OBJECT q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public OBJECT q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public OBJECT br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public OBJECT bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public OBJECT span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public OBJECT span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public OBJECT script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public OBJECT ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public OBJECT del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public OBJECT label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public OBJECT textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public OBJECT button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - private PARAM param_(T e, boolean inline) { - return new PARAM("param", e, opt(false, inline, false)); } - - public class IMG extends EImp implements HamletSpec.IMG { - public IMG(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public IMG $src(String value) { - addAttr("src", value); - return this; - } - - @Override - public IMG $alt(String value) { - addAttr("alt", value); - return this; - } - - @Override - public IMG $ismap() { - addAttr("ismap", null); - return this; - } - - @Override - public IMG $height(String value) { - addAttr("height", value); - return this; - } - - @Override - public IMG $height(int value) { - addAttr("height", value); - return this; - } - - @Override - public IMG $width(int value) { - addAttr("width", value); - return this; - } - - @Override - public IMG $width(String value) { - addAttr("width", value); - return this; - } - - @Override - public IMG $usemap(String value) { - addAttr("usemap", value); - return this; - } - - @Override - public IMG $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public IMG $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public IMG $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public IMG $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public IMG $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public IMG $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public IMG $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public IMG $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public IMG $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public IMG $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public IMG $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public IMG $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public IMG $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public IMG $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public IMG $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public IMG $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - } - - public class LINK extends EImp implements HamletSpec.LINK { - public LINK(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public LINK $rel(EnumSet value) { - addRelAttr("rel", value); - return this; - } - - @Override - public LINK $rel(String value) { - addAttr("rel", value); - return this; - } - - @Override - public LINK $href(String value) { - addAttr("href", value); - return this; - } - - @Override - public LINK $type(String value) { - addAttr("type", value); - return this; - } - - @Override - public LINK $media(EnumSet value) { - addMediaAttr("media", value); - return this; - } - - @Override - public LINK $media(String value) { - addAttr("media", value); - return this; - } - - @Override - public LINK $hreflang(String value) { - addAttr("hreflang", value); - return this; - } - - @Override - public LINK $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public LINK $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public LINK $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public LINK $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public LINK $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public LINK $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public LINK $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public LINK $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public LINK $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public LINK $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public LINK $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public LINK $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public LINK $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public LINK $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public LINK $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public LINK $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - } - - public class AREA extends EImp implements HamletSpec.AREA { - public AREA(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public AREA $href(String value) { - addAttr("href", value); - return this; - } - - @Override - public AREA $tabindex(int value) { - addAttr("tabindex", value); - return this; - } - - @Override - public AREA $accesskey(String value) { - addAttr("accesskey", value); - return this; - } - - @Override - public AREA $onfocus(String value) { - addAttr("onfocus", value); - return this; - } - - @Override - public AREA $onblur(String value) { - addAttr("onblur", value); - return this; - } - - @Override - public AREA $alt(String value) { - addAttr("alt", value); - return this; - } - - @Override - public AREA $shape(Shape value) { - addAttr("shape", value); - return this; - } - - @Override - public AREA $coords(String value) { - addAttr("coords", value); - return this; - } - - @Override - public AREA $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public AREA $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public AREA $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public AREA $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public AREA $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public AREA $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public AREA $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public AREA $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public AREA $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public AREA $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public AREA $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public AREA $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public AREA $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public AREA $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public AREA $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public AREA $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - } - - private AREA area_(T e, boolean inline) { - return new AREA("area", e, opt(false, inline, false)); } - - public class MAP extends EImp implements HamletSpec.MAP { - public MAP(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public MAP $name(String value) { - addAttr("name", value); - return this; - } - - @Override - public AREA> area() { - closeAttrs(); - return area_(this, false); - } - - @Override - public AREA> area(String selector) { - return setSelector(area(), selector); - } - - @Override - public MAP $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public MAP $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public MAP $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public MAP $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public MAP $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public MAP $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public MAP $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public MAP $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public MAP $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public MAP $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public MAP $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public MAP $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public MAP $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public MAP $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public MAP $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public MAP $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public MAP address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public MAP _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public MAP hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public MAP h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public MAP h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public MAP h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public MAP h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public MAP h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public MAP h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public MAP h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public MAP h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public MAP h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public MAP h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public MAP h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public MAP h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - } - - public class A extends EImp implements HamletSpec.A { - public A(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public A $rel(EnumSet value) { - addRelAttr("rel", value); - return this; - } - - @Override - public A $rel(String value) { - addAttr("rel", value); - return this; - } - - @Override - public A $href(String value) { - addAttr("href", value); - return this; - } - - @Override - public A $type(String value) { - addAttr("type", value); - return this; - } - - @Override - public A $tabindex(int value) { - addAttr("tabindex", value); - return this; - } - - @Override - public A $accesskey(String value) { - addAttr("accesskey", value); - return this; - } - - @Override - public A $onfocus(String value) { - addAttr("onfocus", value); - return this; - } - - @Override - public A $onblur(String value) { - addAttr("onblur", value); - return this; - } - - @Override - public A $hreflang(String value) { - addAttr("hreflang", value); - return this; - } - - @Override - public A $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public A $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public A $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public A $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public A $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public A $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public A $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public A $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public A $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public A $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public A $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public A $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public A $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public A $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public A $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public A $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public A _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public A _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public A b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public A b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public A i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public A i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public A small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public A small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public A em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public A em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public A strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public A strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public A dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public A dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public A code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public A code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public A samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public A samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public A kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public A kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public A var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public A var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public A cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public A cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public A abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public A abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public A img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public A q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public A q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public A br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public A bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public A span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public A span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public A script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public A ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public A del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public A sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public A sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public A sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public A sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public A label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public A textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public A button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class DIV extends EImp implements HamletSpec.DIV { - public DIV(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public DIV $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public DIV $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public DIV $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public DIV $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public DIV $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public DIV $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public DIV $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public DIV $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public DIV $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public DIV $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public DIV $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public DIV $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public DIV $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public DIV $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public DIV $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public DIV $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public DIV address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public DIV _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public DIV hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public DIV h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public DIV h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public DIV h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public DIV h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public DIV h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public DIV h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public DIV h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public DIV h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public DIV h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public DIV h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public DIV h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public DIV h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public DIV _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public DIV _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public DIV b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public DIV b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public DIV i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public DIV i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public DIV small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public DIV small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public DIV em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public DIV em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public DIV strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public DIV strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public DIV dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public DIV dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public DIV code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public DIV code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public DIV samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public DIV samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public DIV kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public DIV kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public DIV var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public DIV var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public DIV cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public DIV cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public DIV abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public DIV abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public DIV a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public DIV a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public DIV img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public DIV sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public DIV sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public DIV sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public DIV sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public DIV q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public DIV q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public DIV br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public DIV bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public DIV span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public DIV span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public DIV script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public DIV ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public DIV del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public DIV label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public DIV textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public DIV button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class ADDRESS extends EImp implements HamletSpec.ADDRESS { - public ADDRESS(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public ADDRESS $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public ADDRESS $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public ADDRESS $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public ADDRESS $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public ADDRESS $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public ADDRESS $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public ADDRESS $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public ADDRESS $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public ADDRESS $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public ADDRESS $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public ADDRESS $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public ADDRESS $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public ADDRESS $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public ADDRESS $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public ADDRESS $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public ADDRESS $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public ADDRESS _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public ADDRESS _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public ADDRESS b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public ADDRESS b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public ADDRESS i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public ADDRESS i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public ADDRESS small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public ADDRESS small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public ADDRESS em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public ADDRESS em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public ADDRESS strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public ADDRESS strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public ADDRESS dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public ADDRESS dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public ADDRESS code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public ADDRESS code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public ADDRESS samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public ADDRESS samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public ADDRESS kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public ADDRESS kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public ADDRESS var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public ADDRESS var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public ADDRESS cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public ADDRESS cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public ADDRESS abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public ADDRESS abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public ADDRESS a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public ADDRESS a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public ADDRESS img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public ADDRESS sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public ADDRESS sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public ADDRESS sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public ADDRESS sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public ADDRESS q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public ADDRESS q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public ADDRESS br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public ADDRESS bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public ADDRESS span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public ADDRESS span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public ADDRESS script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public ADDRESS ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public ADDRESS del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public ADDRESS label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public ADDRESS textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public ADDRESS button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class BODY extends EImp implements HamletSpec.BODY { - public BODY(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public BODY $onload(String value) { - addAttr("onload", value); - return this; - } - - @Override - public BODY $onunload(String value) { - addAttr("onunload", value); - return this; - } - - @Override - public BODY $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public BODY $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public BODY $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public BODY $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public BODY $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public BODY $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public BODY $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public BODY $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public BODY $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public BODY $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public BODY $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public BODY $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public BODY $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public BODY $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public BODY $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public BODY $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public TABLE> table() { - closeAttrs(); - return table_(this, false); - } - - @Override - public TABLE> table(String selector) { - return setSelector(table(), selector); - } - - @Override - public BODY address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS> address() { - closeAttrs(); - return address_(this, false); - } - - @Override - public P> p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P> p() { - closeAttrs(); - return p_(this, false); - } - - @Override - public BODY _(Class cls) { - _v(cls); - return this; - } - - @Override - public HR> hr() { - closeAttrs(); - return hr_(this, false); - } - - @Override - public BODY hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL> dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL> dl() { - closeAttrs(); - return dl_(this, false); - } - - @Override - public DIV> div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV> div() { - closeAttrs(); - return div_(this, false); - } - - @Override - public BLOCKQUOTE> blockquote() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE> bq() { - closeAttrs(); - return blockquote_(this, false); - } - - @Override - public BODY h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1> h1() { - closeAttrs(); - return h1_(this, false); - } - - @Override - public BODY h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public BODY h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2> h2() { - closeAttrs(); - return h2_(this, false); - } - - @Override - public BODY h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3> h3() { - closeAttrs(); - return h3_(this, false); - } - - @Override - public BODY h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public BODY h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4> h4() { - closeAttrs(); - return h4_(this, false); - } - - @Override - public BODY h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public BODY h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5> h5() { - closeAttrs(); - return h5_(this, false); - } - - @Override - public BODY h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public BODY h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6> h6() { - closeAttrs(); - return h6_(this, false); - } - - @Override - public BODY h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public BODY h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL> ul() { - closeAttrs(); - return ul_(this, false); - } - - @Override - public UL> ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL> ol() { - closeAttrs(); - return ol_(this, false); - } - - @Override - public OL> ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE> pre() { - closeAttrs(); - return pre_(this, false); - } - - @Override - public PRE> pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM> form() { - closeAttrs(); - return form_(this, false); - } - - @Override - public FORM> form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET> fieldset() { - closeAttrs(); - return fieldset_(this, false); - } - - @Override - public FIELDSET> fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, false); - } - - @Override - public BODY script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, false); - } - - @Override - public BODY ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, false); - } - - @Override - public BODY del(String cdata) { - return del()._(cdata)._(); - } - } - - private TABLE table_(T e, boolean inline) { - return new TABLE("table", e, opt(true, inline, false)); } - - private ADDRESS address_(T e, boolean inline) { - return new ADDRESS("address", e, opt(true, inline, false)); } - - private P p_(T e, boolean inline) { - return new P("p", e, opt(false, inline, false)); } - - private HR hr_(T e, boolean inline) { - return new HR("hr", e, opt(false, inline, false)); } - - private DL dl_(T e, boolean inline) { - return new DL("dl", e, opt(true, inline, false)); } - - private DIV div_(T e, boolean inline) { - return new DIV("div", e, opt(true, inline, false)); } - - private BLOCKQUOTE blockquote_(T e, boolean inline) { - return new BLOCKQUOTE("blockquote", e, opt(true, inline, false)); } - - private BLOCKQUOTE bq_(T e, boolean inline) { - return new BLOCKQUOTE("blockquote", e, opt(true, inline, false)); } - - private FIELDSET fieldset_(T e, boolean inline) { - return new FIELDSET("fieldset", e, opt(true, inline, false)); } - - private FORM form_(T e, boolean inline) { - return new FORM("form", e, opt(true, inline, false)); } - - public class BR extends EImp implements HamletSpec.BR { - public BR(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public BR $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public BR $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public BR $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public BR $style(String value) { - addAttr("style", value); - return this; - } - } - - public class BDO extends EImp implements HamletSpec.BDO { - public BDO(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public BDO $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public BDO $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public BDO $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public BDO $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public BDO $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public BDO $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public BDO _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public BDO _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public BDO b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public BDO b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public BDO i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public BDO i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public BDO small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public BDO small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public BDO em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public BDO em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public BDO strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public BDO strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public BDO dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public BDO dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public BDO code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public BDO code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public BDO samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public BDO samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public BDO kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public BDO kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public BDO var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public BDO var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public BDO cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public BDO cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public BDO abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public BDO abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public BDO a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public BDO a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public BDO img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public BDO sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public BDO sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public BDO sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public BDO sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public BDO q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public BDO q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public BDO br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public BDO bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public BDO span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public BDO span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public BDO script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public BDO ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public BDO del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public BDO label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public BDO textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public BDO button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class SPAN extends EImp implements HamletSpec.SPAN { - public SPAN(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public SPAN $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public SPAN $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public SPAN $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public SPAN $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public SPAN $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public SPAN $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public SPAN $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public SPAN $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public SPAN $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public SPAN $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public SPAN $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public SPAN $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public SPAN $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public SPAN $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public SPAN $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public SPAN $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public SPAN _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public SPAN _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public SPAN b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public SPAN b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public SPAN i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public SPAN i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public SPAN small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public SPAN small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public SPAN em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public SPAN em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public SPAN strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public SPAN strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public SPAN dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public SPAN dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public SPAN code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public SPAN code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public SPAN samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public SPAN samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public SPAN kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public SPAN kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public SPAN var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public SPAN var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public SPAN cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public SPAN cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public SPAN abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public SPAN abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public SPAN a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public SPAN a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public SPAN img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public SPAN sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public SPAN sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public SPAN sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public SPAN sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public SPAN q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public SPAN q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public SPAN br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public SPAN bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public SPAN span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public SPAN span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public SPAN script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public SPAN ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public SPAN del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public SPAN label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public SPAN textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public SPAN button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class SUP extends EImp implements HamletSpec.SUP { - public SUP(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public SUP $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public SUP $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public SUP $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public SUP $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public SUP $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public SUP $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public SUP $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public SUP $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public SUP $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public SUP $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public SUP $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public SUP $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public SUP $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public SUP $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public SUP $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public SUP $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public SUP _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public SUP _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public SUP b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public SUP b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public SUP i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public SUP i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public SUP small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public SUP small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public SUP em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public SUP em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public SUP strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public SUP strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public SUP dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public SUP dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public SUP code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public SUP code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public SUP samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public SUP samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public SUP kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public SUP kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public SUP var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public SUP var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public SUP cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public SUP cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public SUP abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public SUP abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public SUP a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public SUP a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public SUP img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public SUP sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public SUP sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public SUP sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public SUP sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public SUP q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public SUP q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public SUP br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public SUP bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public SUP span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public SUP span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public SUP script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public SUP ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public SUP del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public SUP label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public SUP textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public SUP button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class SUB extends EImp implements HamletSpec.SUB { - public SUB(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public SUB $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public SUB $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public SUB $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public SUB $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public SUB $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public SUB $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public SUB $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public SUB $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public SUB $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public SUB $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public SUB $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public SUB $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public SUB $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public SUB $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public SUB $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public SUB $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public SUB _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public SUB _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public SUB b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public SUB b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public SUB i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public SUB i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public SUB small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public SUB small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public SUB em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public SUB em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public SUB strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public SUB strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public SUB dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public SUB dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public SUB code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public SUB code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public SUB samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public SUB samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public SUB kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public SUB kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public SUB var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public SUB var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public SUB cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public SUB cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public SUB abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public SUB abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public SUB a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public SUB a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public SUB img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public SUB sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public SUB sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public SUB sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public SUB sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public SUB q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public SUB q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public SUB br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public SUB bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public SUB span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public SUB span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public SUB script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public SUB ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public SUB del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public SUB label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public SUB textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public SUB button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class ACRONYM extends EImp implements HamletSpec.ACRONYM { - public ACRONYM(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public ACRONYM $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public ACRONYM $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public ACRONYM $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public ACRONYM $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public ACRONYM $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public ACRONYM $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public ACRONYM $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public ACRONYM $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public ACRONYM $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public ACRONYM $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public ACRONYM $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public ACRONYM $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public ACRONYM $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public ACRONYM $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public ACRONYM $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public ACRONYM $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public ACRONYM _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public ACRONYM _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public ACRONYM b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public ACRONYM b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public ACRONYM i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public ACRONYM i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public ACRONYM small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public ACRONYM small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public ACRONYM em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public ACRONYM em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public ACRONYM strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public ACRONYM strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public ACRONYM dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public ACRONYM dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public ACRONYM code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public ACRONYM code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public ACRONYM samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public ACRONYM samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public ACRONYM kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public ACRONYM kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public ACRONYM var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public ACRONYM var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public ACRONYM cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public ACRONYM cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public ACRONYM abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public ACRONYM abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public ACRONYM a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public ACRONYM a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public ACRONYM img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public ACRONYM sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public ACRONYM sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public ACRONYM sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public ACRONYM sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public ACRONYM q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public ACRONYM q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public ACRONYM br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public ACRONYM bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public ACRONYM span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public ACRONYM span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public ACRONYM script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public ACRONYM ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public ACRONYM del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public ACRONYM label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public ACRONYM textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public ACRONYM button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class ABBR extends EImp implements HamletSpec.ABBR { - public ABBR(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public ABBR $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public ABBR $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public ABBR $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public ABBR $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public ABBR $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public ABBR $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public ABBR $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public ABBR $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public ABBR $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public ABBR $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public ABBR $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public ABBR $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public ABBR $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public ABBR $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public ABBR $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public ABBR $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public ABBR _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public ABBR _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public ABBR b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public ABBR b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public ABBR i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public ABBR i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public ABBR small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public ABBR small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public ABBR em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public ABBR em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public ABBR strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public ABBR strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public ABBR dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public ABBR dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public ABBR code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public ABBR code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public ABBR samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public ABBR samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public ABBR kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public ABBR kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public ABBR var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public ABBR var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public ABBR cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public ABBR cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public ABBR abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public ABBR abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public ABBR a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public ABBR a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public ABBR img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public ABBR sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public ABBR sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public ABBR sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public ABBR sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public ABBR q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public ABBR q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public ABBR br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public ABBR bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public ABBR span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public ABBR span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public ABBR script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public ABBR ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public ABBR del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public ABBR label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public ABBR textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public ABBR button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class CITE extends EImp implements HamletSpec.CITE { - public CITE(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public CITE $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public CITE $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public CITE $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public CITE $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public CITE $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public CITE $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public CITE $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public CITE $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public CITE $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public CITE $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public CITE $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public CITE $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public CITE $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public CITE $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public CITE $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public CITE $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public CITE _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public CITE _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public CITE b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public CITE b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public CITE i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public CITE i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public CITE small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public CITE small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public CITE em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public CITE em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public CITE strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public CITE strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public CITE dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public CITE dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public CITE code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public CITE code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public CITE samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public CITE samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public CITE kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public CITE kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public CITE var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public CITE var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public CITE cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public CITE cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public CITE abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public CITE abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public CITE a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public CITE a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public CITE img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public CITE sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public CITE sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public CITE sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public CITE sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public CITE q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public CITE q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public CITE br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public CITE bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public CITE span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public CITE span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public CITE script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public CITE ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public CITE del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public CITE label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public CITE textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public CITE button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class VAR extends EImp implements HamletSpec.VAR { - public VAR(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public VAR $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public VAR $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public VAR $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public VAR $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public VAR $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public VAR $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public VAR $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public VAR $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public VAR $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public VAR $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public VAR $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public VAR $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public VAR $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public VAR $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public VAR $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public VAR $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public VAR _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public VAR _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public VAR b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public VAR b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public VAR i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public VAR i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public VAR small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public VAR small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public VAR em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public VAR em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public VAR strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public VAR strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public VAR dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public VAR dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public VAR code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public VAR code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public VAR samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public VAR samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public VAR kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public VAR kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public VAR var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public VAR var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public VAR cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public VAR cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public VAR abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public VAR abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public VAR a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public VAR a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public VAR img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public VAR sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public VAR sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public VAR sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public VAR sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public VAR q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public VAR q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public VAR br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public VAR bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public VAR span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public VAR span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public VAR script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public VAR ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public VAR del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public VAR label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public VAR textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public VAR button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class KBD extends EImp implements HamletSpec.KBD { - public KBD(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public KBD $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public KBD $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public KBD $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public KBD $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public KBD $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public KBD $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public KBD $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public KBD $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public KBD $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public KBD $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public KBD $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public KBD $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public KBD $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public KBD $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public KBD $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public KBD $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public KBD _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public KBD _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public KBD b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public KBD b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public KBD i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public KBD i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public KBD small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public KBD small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public KBD em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public KBD em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public KBD strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public KBD strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public KBD dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public KBD dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public KBD code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public KBD code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public KBD samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public KBD samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public KBD kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public KBD kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public KBD var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public KBD var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public KBD cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public KBD cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public KBD abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public KBD abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public KBD a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public KBD a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public KBD img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public KBD sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public KBD sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public KBD sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public KBD sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public KBD q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public KBD q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public KBD br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public KBD bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public KBD span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public KBD span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public KBD script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public KBD ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public KBD del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public KBD label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public KBD textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public KBD button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class SAMP extends EImp implements HamletSpec.SAMP { - public SAMP(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public SAMP $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public SAMP $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public SAMP $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public SAMP $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public SAMP $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public SAMP $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public SAMP $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public SAMP $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public SAMP $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public SAMP $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public SAMP $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public SAMP $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public SAMP $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public SAMP $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public SAMP $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public SAMP $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public SAMP _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public SAMP _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public SAMP b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public SAMP b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public SAMP i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public SAMP i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public SAMP small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public SAMP small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public SAMP em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public SAMP em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public SAMP strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public SAMP strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public SAMP dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public SAMP dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public SAMP code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public SAMP code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public SAMP samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public SAMP samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public SAMP kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public SAMP kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public SAMP var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public SAMP var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public SAMP cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public SAMP cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public SAMP abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public SAMP abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public SAMP a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public SAMP a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public SAMP img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public SAMP sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public SAMP sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public SAMP sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public SAMP sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public SAMP q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public SAMP q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public SAMP br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public SAMP bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public SAMP span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public SAMP span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public SAMP script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public SAMP ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public SAMP del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public SAMP label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public SAMP textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public SAMP button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class CODE extends EImp implements HamletSpec.CODE { - public CODE(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public CODE $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public CODE $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public CODE $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public CODE $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public CODE $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public CODE $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public CODE $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public CODE $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public CODE $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public CODE $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public CODE $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public CODE $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public CODE $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public CODE $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public CODE $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public CODE $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public CODE _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public CODE _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public CODE b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public CODE b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public CODE i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public CODE i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public CODE small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public CODE small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public CODE em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public CODE em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public CODE strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public CODE strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public CODE dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public CODE dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public CODE code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public CODE code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public CODE samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public CODE samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public CODE kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public CODE kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public CODE var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public CODE var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public CODE cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public CODE cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public CODE abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public CODE abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public CODE a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public CODE a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public CODE img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public CODE sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public CODE sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public CODE sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public CODE sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public CODE q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public CODE q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public CODE br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public CODE bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public CODE span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public CODE span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public CODE script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public CODE ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public CODE del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public CODE label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public CODE textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public CODE button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class DFN extends EImp implements HamletSpec.DFN { - public DFN(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public DFN $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public DFN $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public DFN $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public DFN $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public DFN $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public DFN $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public DFN $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public DFN $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public DFN $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public DFN $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public DFN $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public DFN $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public DFN $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public DFN $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public DFN $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public DFN $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public DFN _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public DFN _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public DFN b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public DFN b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public DFN i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public DFN i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public DFN small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public DFN small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public DFN em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public DFN em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public DFN strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public DFN strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public DFN dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public DFN dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public DFN code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public DFN code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public DFN samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public DFN samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public DFN kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public DFN kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public DFN var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public DFN var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public DFN cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public DFN cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public DFN abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public DFN abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public DFN a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public DFN a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public DFN img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public DFN sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public DFN sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public DFN sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public DFN sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public DFN q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public DFN q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public DFN br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public DFN bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public DFN span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public DFN span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public DFN script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public DFN ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public DFN del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public DFN label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public DFN textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public DFN button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class STRONG extends EImp implements HamletSpec.STRONG { - public STRONG(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public STRONG $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public STRONG $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public STRONG $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public STRONG $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public STRONG $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public STRONG $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public STRONG $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public STRONG $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public STRONG $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public STRONG $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public STRONG $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public STRONG $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public STRONG $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public STRONG $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public STRONG $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public STRONG $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public STRONG _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public STRONG _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public STRONG b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public STRONG b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public STRONG i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public STRONG i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public STRONG small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public STRONG small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public STRONG em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public STRONG em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public STRONG strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public STRONG strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public STRONG dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public STRONG dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public STRONG code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public STRONG code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public STRONG samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public STRONG samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public STRONG kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public STRONG kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public STRONG var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public STRONG var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public STRONG cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public STRONG cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public STRONG abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public STRONG abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public STRONG a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public STRONG a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public STRONG img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public STRONG sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public STRONG sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public STRONG sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public STRONG sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public STRONG q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public STRONG q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public STRONG br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public STRONG bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public STRONG span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public STRONG span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public STRONG script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public STRONG ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public STRONG del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public STRONG label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public STRONG textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public STRONG button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class EM extends EImp implements HamletSpec.EM { - public EM(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public EM $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public EM $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public EM $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public EM $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public EM $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public EM $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public EM $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public EM $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public EM $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public EM $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public EM $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public EM $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public EM $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public EM $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public EM $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public EM $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public EM _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public EM _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public EM b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public EM b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public EM i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public EM i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public EM small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public EM small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public EM em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public EM em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public EM strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public EM strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public EM dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public EM dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public EM code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public EM code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public EM samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public EM samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public EM kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public EM kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public EM var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public EM var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public EM cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public EM cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public EM abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public EM abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public EM a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public EM a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public EM img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public EM sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public EM sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public EM sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public EM sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public EM q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public EM q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public EM br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public EM bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public EM span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public EM span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public EM script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public EM ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public EM del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public EM label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public EM textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public EM button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class SMALL extends EImp implements HamletSpec.SMALL { - public SMALL(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public SMALL $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public SMALL $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public SMALL $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public SMALL $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public SMALL $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public SMALL $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public SMALL $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public SMALL $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public SMALL $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public SMALL $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public SMALL $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public SMALL $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public SMALL $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public SMALL $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public SMALL $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public SMALL $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public SMALL _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public SMALL _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public SMALL b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public SMALL b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public SMALL i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public SMALL i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public SMALL small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public SMALL small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public SMALL em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public SMALL em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public SMALL strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public SMALL strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public SMALL dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public SMALL dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public SMALL code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public SMALL code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public SMALL samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public SMALL samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public SMALL kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public SMALL kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public SMALL var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public SMALL var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public SMALL cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public SMALL cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public SMALL abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public SMALL abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public SMALL a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public SMALL a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public SMALL img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public SMALL sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public SMALL sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public SMALL sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public SMALL sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public SMALL q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public SMALL q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public SMALL br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public SMALL bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public SMALL span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public SMALL span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public SMALL script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public SMALL ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public SMALL del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public SMALL label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public SMALL textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public SMALL button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class B extends EImp implements HamletSpec.B { - public B(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public B $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public B $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public B $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public B $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public B $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public B $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public B $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public B $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public B $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public B $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public B $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public B $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public B $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public B $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public B $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public B $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public B _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public B _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public B b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public B b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public B i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public B i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public B small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public B small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public B em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public B em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public B strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public B strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public B dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public B dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public B code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public B code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public B samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public B samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public B kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public B kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public B var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public B var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public B cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public B cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public B abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public B abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public B a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public B a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public B img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public B sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public B sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public B sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public B sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public B q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public B q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public B br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public B bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public B span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public B span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public B script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public B ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public B del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public B label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public B textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public B button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - public class I extends EImp implements HamletSpec.I { - public I(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - @Override - public I $id(String value) { - addAttr("id", value); - return this; - } - - @Override - public I $class(String value) { - addAttr("class", value); - return this; - } - - @Override - public I $title(String value) { - addAttr("title", value); - return this; - } - - @Override - public I $style(String value) { - addAttr("style", value); - return this; - } - - @Override - public I $lang(String value) { - addAttr("lang", value); - return this; - } - - @Override - public I $dir(Dir value) { - addAttr("dir", value); - return this; - } - - @Override - public I $onclick(String value) { - addAttr("onclick", value); - return this; - } - - @Override - public I $ondblclick(String value) { - addAttr("ondblclick", value); - return this; - } - - @Override - public I $onmousedown(String value) { - addAttr("onmousedown", value); - return this; - } - - @Override - public I $onmouseup(String value) { - addAttr("onmouseup", value); - return this; - } - - @Override - public I $onmouseover(String value) { - addAttr("onmouseover", value); - return this; - } - - @Override - public I $onmousemove(String value) { - addAttr("onmousemove", value); - return this; - } - - @Override - public I $onmouseout(String value) { - addAttr("onmouseout", value); - return this; - } - - @Override - public I $onkeypress(String value) { - addAttr("onkeypress", value); - return this; - } - - @Override - public I $onkeydown(String value) { - addAttr("onkeydown", value); - return this; - } - - @Override - public I $onkeyup(String value) { - addAttr("onkeyup", value); - return this; - } - - @Override - public I _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public I _r(Object... lines) { - _p(false, lines); - return this; - } - - @Override - public B> b() { - closeAttrs(); - return b_(this, true); - } - - @Override - public I b(String cdata) { - return b()._(cdata)._(); - } - - @Override - public I b(String selector, String cdata) { - return setSelector(b(), selector)._(cdata)._(); - } - - @Override - public I> i() { - closeAttrs(); - return i_(this, true); - } - - @Override - public I i(String cdata) { - return i()._(cdata)._(); - } - - @Override - public I i(String selector, String cdata) { - return setSelector(i(), selector)._(cdata)._(); - } - - @Override - public SMALL> small() { - closeAttrs(); - return small_(this, true); - } - - @Override - public I small(String cdata) { - return small()._(cdata)._(); - } - - @Override - public I small(String selector, String cdata) { - return setSelector(small(), selector)._(cdata)._(); - } - - @Override - public I em(String cdata) { - return em()._(cdata)._(); - } - - @Override - public EM> em() { - closeAttrs(); - return em_(this, true); - } - - @Override - public I em(String selector, String cdata) { - return setSelector(em(), selector)._(cdata)._(); - } - - @Override - public STRONG> strong() { - closeAttrs(); - return strong_(this, true); - } - - @Override - public I strong(String cdata) { - return strong()._(cdata)._(); - } - - @Override - public I strong(String selector, String cdata) { - return setSelector(strong(), selector)._(cdata)._(); - } - - @Override - public DFN> dfn() { - closeAttrs(); - return dfn_(this, true); - } - - @Override - public I dfn(String cdata) { - return dfn()._(cdata)._(); - } - - @Override - public I dfn(String selector, String cdata) { - return setSelector(dfn(), selector)._(cdata)._(); - } - - @Override - public CODE> code() { - closeAttrs(); - return code_(this, true); - } - - @Override - public I code(String cdata) { - return code()._(cdata)._(); - } - - @Override - public I code(String selector, String cdata) { - return setSelector(code(), selector)._(cdata)._(); - } - - @Override - public I samp(String cdata) { - return samp()._(cdata)._(); - } - - @Override - public SAMP> samp() { - closeAttrs(); - return samp_(this, true); - } - - @Override - public I samp(String selector, String cdata) { - return setSelector(samp(), selector)._(cdata)._(); - } - - @Override - public KBD> kbd() { - closeAttrs(); - return kbd_(this, true); - } - - @Override - public I kbd(String cdata) { - return kbd()._(cdata)._(); - } - - @Override - public I kbd(String selector, String cdata) { - return setSelector(kbd(), selector)._(cdata)._(); - } - - @Override - public VAR> var() { - closeAttrs(); - return var_(this, true); - } - - @Override - public I var(String cdata) { - return var()._(cdata)._(); - } - - @Override - public I var(String selector, String cdata) { - return setSelector(var(), selector)._(cdata)._(); - } - - @Override - public CITE> cite() { - closeAttrs(); - return cite_(this, true); - } - - @Override - public I cite(String cdata) { - return cite()._(cdata)._(); - } - - @Override - public I cite(String selector, String cdata) { - return setSelector(cite(), selector)._(cdata)._(); - } - - @Override - public ABBR> abbr() { - closeAttrs(); - return abbr_(this, true); - } - - @Override - public I abbr(String cdata) { - return abbr()._(cdata)._(); - } - - @Override - public I abbr(String selector, String cdata) { - return setSelector(abbr(), selector)._(cdata)._(); - } - - @Override - public A> a() { - closeAttrs(); - return a_(this, true); - } - - @Override - public A> a(String selector) { - return setSelector(a(), selector); - } - - @Override - public I a(String href, String anchorText) { - return a().$href(href)._(anchorText)._(); - } - - @Override - public I a(String selector, String href, String anchorText) { - return setSelector(a(), selector).$href(href)._(anchorText)._(); - } - - @Override - public IMG> img() { - closeAttrs(); - return img_(this, true); - } - - @Override - public I img(String src) { - return img().$src(src)._(); - } - - @Override - public OBJECT> object() { - closeAttrs(); - return object_(this, true); - } - - @Override - public OBJECT> object(String selector) { - return setSelector(object(), selector); - } - - @Override - public SUB> sub() { - closeAttrs(); - return sub_(this, true); - } - - @Override - public I sub(String cdata) { - return sub()._(cdata)._(); - } - - @Override - public I sub(String selector, String cdata) { - return setSelector(sub(), selector)._(cdata)._(); - } - - @Override - public SUP> sup() { - closeAttrs(); - return sup_(this, true); - } - - @Override - public I sup(String cdata) { - return sup()._(cdata)._(); - } - - @Override - public I sup(String selector, String cdata) { - return setSelector(sup(), selector)._(cdata)._(); - } - - @Override - public MAP> map() { - closeAttrs(); - return map_(this, true); - } - - @Override - public MAP> map(String selector) { - return setSelector(map(), selector); - } - - @Override - public I q(String cdata) { - return q()._(cdata)._(); - } - - @Override - public I q(String selector, String cdata) { - return setSelector(q(), selector)._(cdata)._(); - } - - @Override - public Q> q() { - closeAttrs(); - return q_(this, true); - } - - @Override - public BR> br() { - closeAttrs(); - return br_(this, true); - } - - @Override - public I br(String selector) { - return setSelector(br(), selector)._(); - } - - @Override - public BDO> bdo() { - closeAttrs(); - return bdo_(this, true); - } - - @Override - public I bdo(Dir dir, String cdata) { - return bdo().$dir(dir)._(cdata)._(); - } - - @Override - public SPAN> span() { - closeAttrs(); - return span_(this, true); - } - - @Override - public I span(String cdata) { - return span()._(cdata)._(); - } - - @Override - public I span(String selector, String cdata) { - return setSelector(span(), selector)._(cdata)._(); - } - - @Override - public SCRIPT> script() { - closeAttrs(); - return script_(this, true); - } - - @Override - public I script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public INS> ins() { - closeAttrs(); - return ins_(this, true); - } - - @Override - public I ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL> del() { - closeAttrs(); - return del_(this, true); - } - - @Override - public I del(String cdata) { - return del()._(cdata)._(); - } - - @Override - public LABEL> label() { - closeAttrs(); - return label_(this, true); - } - - @Override - public I label(String forId, String cdata) { - return label().$for(forId)._(cdata)._(); - } - - @Override - public INPUT> input(String selector) { - return setSelector(input(), selector); - } - - @Override - public INPUT> input() { - closeAttrs(); - return input_(this, true); - } - - @Override - public SELECT> select() { - closeAttrs(); - return select_(this, true); - } - - @Override - public SELECT> select(String selector) { - return setSelector(select(), selector); - } - - @Override - public TEXTAREA> textarea(String selector) { - return setSelector(textarea(), selector); - } - - @Override - public TEXTAREA> textarea() { - closeAttrs(); - return textarea_(this, true); - } - - @Override - public I textarea(String selector, String cdata) { - return setSelector(textarea(), selector)._(cdata)._(); - } - - @Override - public BUTTON> button() { - closeAttrs(); - return button_(this, true); - } - - @Override - public BUTTON> button(String selector) { - return setSelector(button(), selector); - } - - @Override - public I button(String selector, String cdata) { - return setSelector(button(), selector)._(cdata)._(); - } - } - - private INPUT input_(T e, boolean inline) { - return new INPUT("input", e, opt(false, inline, false)); } - - private SELECT select_(T e, boolean inline) { - return new SELECT("select", e, opt(true, inline, false)); } - - private TEXTAREA textarea_(T e, boolean inline) { - return new TEXTAREA("textarea", e, opt(true, inline, false)); } - - private BUTTON button_(T e, boolean inline) { - return new BUTTON("button", e, opt(true, inline, false)); } - - private LABEL label_(T e, boolean inline) { - return new LABEL("label", e, opt(true, inline, false)); } - - private MAP map_(T e, boolean inline) { - return new MAP("map", e, opt(true, inline, false)); } - - private Q q_(T e, boolean inline) { - return new Q("q", e, opt(true, inline, false)); } - - private BR br_(T e, boolean inline) { - return new BR("br", e, opt(false, inline, false)); } - - private BDO bdo_(T e, boolean inline) { - return new BDO("bdo", e, opt(true, inline, false)); } - - private SPAN span_(T e, boolean inline) { - return new SPAN("span", e, opt(true, inline, false)); } - - private INS ins_(T e, boolean inline) { - return new INS("ins", e, opt(true, inline, false)); } - - private DEL del_(T e, boolean inline) { - return new DEL("del", e, opt(true, inline, false)); } - - private A a_(T e, boolean inline) { - return new A("a", e, opt(true, inline, false)); } - - private SUB sub_(T e, boolean inline) { - return new SUB("sub", e, opt(true, inline, false)); } - - private SUP sup_(T e, boolean inline) { - return new SUP("sup", e, opt(true, inline, false)); } - - private IMG img_(T e, boolean inline) { - return new IMG("img", e, opt(false, inline, false)); } - - private EM em_(T e, boolean inline) { - return new EM("em", e, opt(true, inline, false)); } - - private STRONG strong_(T e, boolean inline) { - return new STRONG("strong", e, opt(true, inline, false)); } - - private DFN dfn_(T e, boolean inline) { - return new DFN("dfn", e, opt(true, inline, false)); } - - private CODE code_(T e, boolean inline) { - return new CODE("code", e, opt(true, inline, false)); } - - private SAMP samp_(T e, boolean inline) { - return new SAMP("samp", e, opt(true, inline, false)); } - - private KBD kbd_(T e, boolean inline) { - return new KBD("kbd", e, opt(true, inline, false)); } - - private VAR var_(T e, boolean inline) { - return new VAR("var", e, opt(true, inline, false)); } - - private CITE cite_(T e, boolean inline) { - return new CITE("cite", e, opt(true, inline, false)); } - - private ABBR abbr_(T e, boolean inline) { - return new ABBR("abbr", e, opt(true, inline, false)); } - - private B b_(T e, boolean inline) { - return new B("b", e, opt(true, inline, false)); } - - private I i_(T e, boolean inline) { - return new I("i", e, opt(true, inline, false)); } - - private SMALL small_(T e, boolean inline) { - return new SMALL("small", e, opt(true, inline, false)); } - - private PRE pre_(T e, boolean inline) { - return new PRE("pre", e, opt(true, inline, true)); } - - private UL ul_(T e, boolean inline) { - return new UL("ul", e, opt(true, inline, false)); } - - private OL ol_(T e, boolean inline) { - return new OL("ol", e, opt(true, inline, false)); } - - private H1 h1_(T e, boolean inline) { - return new H1("h1", e, opt(true, inline, false)); } - - private H2 h2_(T e, boolean inline) { - return new H2("h2", e, opt(true, inline, false)); } - - private H3 h3_(T e, boolean inline) { - return new H3("h3", e, opt(true, inline, false)); } - - private H4 h4_(T e, boolean inline) { - return new H4("h4", e, opt(true, inline, false)); } - - private H5 h5_(T e, boolean inline) { - return new H5("h5", e, opt(true, inline, false)); } - - private H6 h6_(T e, boolean inline) { - return new H6("h6", e, opt(true, inline, false)); } - - private STYLE style_(T e, boolean inline) { - return new STYLE("style", e, opt(true, inline, false)); } - - private LINK link_(T e, boolean inline) { - return new LINK("link", e, opt(false, inline, false)); } - - private META meta_(T e, boolean inline) { - return new META("meta", e, opt(false, inline, false)); } - - private OBJECT object_(T e, boolean inline) { - return new OBJECT("object", e, opt(true, inline, false)); } - - private SCRIPT script_(T e, boolean inline) { - return new SCRIPT("script", e, opt(true, inline, false)); } - - @Override - public HEAD head() { - return head_(this, false); - } - - @Override - public BODY body() { - return body_(this, false); - } - - @Override - public BODY body(String selector) { - return setSelector(body(), selector); - } - - @Override - public BASE base() { - return base_(this, false); - } - - @Override - public Hamlet base(String href) { - return base().$href(href)._(); - } - - @Override - public TITLE title() { - return title_(this, false); - } - - @Override - public Hamlet title(String cdata) { - return title()._(cdata)._(); - } - - @Override - public STYLE style() { - return style_(this, false); - } - - @Override - public Hamlet style(Object... lines) { - return style().$type("text/css")._(lines)._(); - } - - @Override - public LINK link() { - return link_(this, false); - } - - @Override - public Hamlet link(String href) { - return setLinkHref(link(), href)._(); - } - - @Override - public META meta() { - return meta_(this, false); - } - - @Override - public Hamlet meta(String name, String content) { - return meta().$name(name).$content(content)._(); - } - - @Override - public Hamlet meta_http(String header, String content) { - return meta().$http_equiv(header).$content(content)._(); - } - - @Override - public SCRIPT script() { - return script_(this, false); - } - - @Override - public Hamlet script(String src) { - return setScriptSrc(script(), src)._(); - } - - @Override - public OBJECT object() { - return object_(this, true); - } - - @Override - public OBJECT object(String selector) { - return setSelector(object(), selector); - } - - @Override - public TABLE table() { - return table_(this, false); - } - - @Override - public TABLE table(String selector) { - return setSelector(table(), selector); - } - - @Override - public Hamlet address(String cdata) { - return address()._(cdata)._(); - } - - @Override - public ADDRESS address() { - return address_(this, false); - } - - @Override - public P p(String selector) { - return setSelector(p(), selector); - } - - @Override - public P p() { - return p_(this, false); - } - - @Override - public Hamlet _(Class cls) { - subView(cls); - return this; - } - - @Override - public HR hr() { - return hr_(this, false); - } - - @Override - public Hamlet hr(String selector) { - return setSelector(hr(), selector)._(); - } - - @Override - public DL dl(String selector) { - return setSelector(dl(), selector); - } - - @Override - public DL dl() { - return dl_(this, false); - } - - @Override - public DIV div(String selector) { - return setSelector(div(), selector); - } - - @Override - public DIV div() { - return div_(this, false); - } - - @Override - public BLOCKQUOTE blockquote() { - return blockquote_(this, false); - } - - @Override - public BLOCKQUOTE bq() { - return blockquote_(this, false); - } - - @Override - public Hamlet h1(String cdata) { - return h1()._(cdata)._(); - } - - @Override - public H1 h1() { - return h1_(this, false); - } - - @Override - public Hamlet h1(String selector, String cdata) { - return setSelector(h1(), selector)._(cdata)._(); - } - - @Override - public Hamlet h2(String cdata) { - return h2()._(cdata)._(); - } - - @Override - public H2 h2() { - return h2_(this, false); - } - - @Override - public Hamlet h2(String selector, String cdata) { - return setSelector(h2(), selector)._(cdata)._(); - } - - @Override - public H3 h3() { - return h3_(this, false); - } - - @Override - public Hamlet h3(String cdata) { - return h3()._(cdata)._(); - } - - @Override - public Hamlet h3(String selector, String cdata) { - return setSelector(h3(), selector)._(cdata)._(); - } - - @Override - public H4 h4() { - return h4_(this, false); - } - - @Override - public Hamlet h4(String cdata) { - return h4()._(cdata)._(); - } - - @Override - public Hamlet h4(String selector, String cdata) { - return setSelector(h4(), selector)._(cdata)._(); - } - - @Override - public H5 h5() { - return h5_(this, false); - } - - @Override - public Hamlet h5(String cdata) { - return h5()._(cdata)._(); - } - - @Override - public Hamlet h5(String selector, String cdata) { - return setSelector(h5(), selector)._(cdata)._(); - } - - @Override - public H6 h6() { - return h6_(this, false); - } - - @Override - public Hamlet h6(String cdata) { - return h6()._(cdata)._(); - } - - @Override - public Hamlet h6(String selector, String cdata) { - return setSelector(h6(), selector)._(cdata)._(); - } - - @Override - public UL ul() { - return ul_(this, false); - } - - @Override - public UL ul(String selector) { - return setSelector(ul(), selector); - } - - @Override - public OL ol() { - return ol_(this, false); - } - - @Override - public OL ol(String selector) { - return setSelector(ol(), selector); - } - - @Override - public PRE pre() { - return pre_(this, false); - } - - @Override - public PRE pre(String selector) { - return setSelector(pre(), selector); - } - - @Override - public FORM form() { - return form_(this, false); - } - - @Override - public FORM form(String selector) { - return setSelector(form(), selector); - } - - @Override - public FIELDSET fieldset() { - return fieldset_(this, false); - } - - @Override - public FIELDSET fieldset(String selector) { - return setSelector(fieldset(), selector); - } - - @Override - public INS ins() { - return ins_(this, false); - } - - @Override - public Hamlet ins(String cdata) { - return ins()._(cdata)._(); - } - - @Override - public DEL del() { - return del_(this, false); - } - - @Override - public Hamlet del(String cdata) { - return del()._(cdata)._(); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java deleted file mode 100644 index 83373ef8ca215..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java +++ /dev/null @@ -1,451 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn.webapp.hamlet; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - -import java.io.IOException; -import java.io.PrintWriter; -import java.lang.annotation.Annotation; -import java.lang.reflect.Method; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.util.Set; -import java.util.regex.Pattern; - -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.GnuParser; -import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.cli.Options; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.webapp.WebAppException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Generates a specific hamlet implementation class from a spec class - * using a generic hamlet implementation class. - * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead. - */ -@Deprecated -@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) -public class HamletGen { - static final Logger LOG = LoggerFactory.getLogger(HamletGen.class); - static final Options opts = new Options(); - static { - opts.addOption("h", "help", false, "Print this help message"). - addOption("s", "spec-class", true, - "The class that holds the spec interfaces. e.g. HamletSpec"). - addOption("i", "impl-class", true, - "An implementation class. e.g. HamletImpl"). - addOption("o", "output-class", true, "Output class name"). - addOption("p", "output-package", true, "Output package name"); - }; - - static final Pattern elementRegex = Pattern.compile("^[A-Z][A-Z0-9]*$"); - - int bytes = 0; - PrintWriter out; - final Set endTagOptional = Sets.newHashSet(); - final Set inlineElements = Sets.newHashSet(); - Class top; // html top-level interface - String hamlet; // output class simple name; - boolean topMode; - - /** - * Generate a specific Hamlet implementation from a spec. - * @param specClass holds hamlet interfaces. e.g. {@link HamletSpec} - * @param implClass a generic hamlet implementation. e.g. {@link HamletImpl} - * @param outputName name of the output class. e.g. {@link Hamlet} - * @param outputPkg package name of the output class. - * @throws IOException - */ - public void generate(Class specClass, Class implClass, - String outputName, String outputPkg) throws IOException { - LOG.info("Generating {} using {} and {}", new Object[]{outputName, - specClass, implClass}); - out = new PrintWriter(outputName +".java", "UTF-8"); - hamlet = basename(outputName); - String pkg = pkgName(outputPkg, implClass.getPackage().getName()); - puts(0, "// Generated by HamletGen. Do NOT edit!\n", - "package ", pkg, ";\n", - "import java.io.PrintWriter;\n", - "import java.util.EnumSet;\n", - "import static java.util.EnumSet.*;\n", - "import static ", implClass.getName(), ".EOpt.*;\n", - "import org.apache.hadoop.yarn.webapp.SubView;"); - String implClassName = implClass.getSimpleName(); - if (!implClass.getPackage().getName().equals(pkg)) { - puts(0, "import ", implClass.getName(), ';'); - } - puts(0, "\n", - "public class ", hamlet, " extends ", implClassName, - " implements ", specClass.getSimpleName(), "._Html {\n", - " public ", hamlet, "(PrintWriter out, int nestLevel,", - " boolean wasInline) {\n", - " super(out, nestLevel, wasInline);\n", - " }\n\n", // inline is context sensitive - " static EnumSet opt(boolean endTag, boolean inline, ", - "boolean pre) {\n", - " EnumSet opts = of(ENDTAG);\n", - " if (!endTag) opts.remove(ENDTAG);\n", - " if (inline) opts.add(INLINE);\n", - " if (pre) opts.add(PRE);\n", - " return opts;\n", - " }"); - initLut(specClass); - genImpl(specClass, implClassName, 1); - LOG.info("Generating {} methods", hamlet); - genMethods(hamlet, top, 1); - puts(0, "}"); - out.close(); - LOG.info("Wrote {} bytes to {}.java", bytes, outputName); - } - - String basename(String path) { - return path.substring(path.lastIndexOf('/') + 1); - } - - String pkgName(String pkg, String defaultPkg) { - if (pkg == null || pkg.isEmpty()) return defaultPkg; - return pkg; - } - - void initLut(Class spec) { - endTagOptional.clear(); - inlineElements.clear(); - for (Class cls : spec.getClasses()) { - Annotation a = cls.getAnnotation(HamletSpec.Element.class); - if (a != null && !((HamletSpec.Element) a).endTag()) { - endTagOptional.add(cls.getSimpleName()); - } - if (cls.getSimpleName().equals("Inline")) { - for (Method method : cls.getMethods()) { - String retName = method.getReturnType().getSimpleName(); - if (isElement(retName)) { - inlineElements.add(retName); - } - } - } - } - } - - void genImpl(Class spec, String implClassName, int indent) { - String specName = spec.getSimpleName(); - for (Class cls : spec.getClasses()) { - String className = cls.getSimpleName(); - if (cls.isInterface()) { - genFactoryMethods(cls, indent); - } - if (isElement(className)) { - LOG.info("Generating class {}", className); - puts(indent, "\n", - "public class ", className, "", - " extends EImp implements ", specName, ".", className, " {\n", - " public ", className, "(String name, T parent,", - " EnumSet opts) {\n", - " super(name, parent, opts);\n", - " }"); - genMethods(className, cls, indent + 1); - puts(indent, "}"); - } else if (className.equals("_Html")) { - top = cls; - } - } - } - - void genFactoryMethods(Class cls, int indent) { - for (Method method : cls.getDeclaredMethods()) { - String retName = method.getReturnType().getSimpleName(); - String methodName = method.getName(); - if (methodName.charAt(0) == '$') continue; - if (isElement(retName) && method.getParameterTypes().length == 0) { - genFactoryMethod(retName, methodName, indent); - } - } - } - - void genMethods(String className, Class cls, int indent) { - topMode = (top != null && cls.equals(top)); - for (Method method : cls.getMethods()) { - String retName = method.getReturnType().getSimpleName(); - if (method.getName().charAt(0) == '$') { - genAttributeMethod(className, method, indent); - } else if (isElement(retName)) { - genNewElementMethod(className, method, indent); - } else { - genCurElementMethod(className, method, indent); - } - } - } - - void genAttributeMethod(String className, Method method, int indent) { - String methodName = method.getName(); - String attrName = methodName.substring(1).replace('_', '-'); - Type[] params = method.getGenericParameterTypes(); - echo(indent, "\n", - "@Override\n", - "public ", className, topMode ? " " : " ", methodName, "("); - if (params.length == 0) { - puts(0, ") {"); - puts(indent, - " addAttr(\"", attrName, "\", null);\n", - " return this;\n", "}"); - } else if (params.length == 1) { - String typeName = getTypeName(params[0]); - puts(0, typeName, " value) {"); - if (typeName.equals("EnumSet")) { - puts(indent, - " addRelAttr(\"", attrName, "\", value);\n", - " return this;\n", "}"); - } else if (typeName.equals("EnumSet")) { - puts(indent, - " addMediaAttr(\"", attrName, "\", value);\n", - " return this;\n", "}"); - } else { - puts(indent, - " addAttr(\"", attrName, "\", value);\n", - " return this;\n", "}"); - } - } else { - throwUnhandled(className, method); - } - } - - String getTypeName(Type type) { - if (type instanceof Class) { - return ((Class)type).getSimpleName(); - } - ParameterizedType pt = (ParameterizedType) type; - return ((Class)pt.getRawType()).getSimpleName() +"<"+ - ((Class)pt.getActualTypeArguments()[0]).getSimpleName() +">"; - } - - void genFactoryMethod(String retName, String methodName, int indent) { - puts(indent, "\n", - "private ", retName, " ", methodName, - "_(T e, boolean inline) {\n", - " return new ", retName, "(\"", StringUtils.toLowerCase(retName), - "\", e, opt(", !endTagOptional.contains(retName), ", inline, ", - retName.equals("PRE"), ")); }"); - } - - void genNewElementMethod(String className, Method method, int indent) { - String methodName = method.getName(); - String retName = method.getReturnType().getSimpleName(); - Class[] params = method.getParameterTypes(); - echo(indent, "\n", - "@Override\n", - "public ", retName, "<", className, topMode ? "> " : "> ", - methodName, "("); - if (params.length == 0) { - puts(0, ") {"); - puts(indent, - topMode ? "" : " closeAttrs();\n", - " return ", StringUtils.toLowerCase(retName), "_" + "(this, ", - isInline(className, retName), ");\n", "}"); - } else if (params.length == 1) { - puts(0, "String selector) {"); - puts(indent, - " return setSelector(", methodName, "(), selector);\n", "}"); - } else { - throwUnhandled(className, method); - } - } - - boolean isInline(String container, String className) { - if ((container.equals("BODY") || container.equals(hamlet) || - container.equals("HEAD") || container.equals("HTML")) && - (className.equals("INS") || className.equals("DEL") || - className.equals("SCRIPT"))) { - return false; - } - return inlineElements.contains(className); - } - - void genCurElementMethod(String className, Method method, int indent) { - String methodName = method.getName(); - Class[] params = method.getParameterTypes(); - if (topMode || params.length > 0) { - echo(indent, "\n", - "@Override\n", - "public ", className, topMode ? " " : " ", methodName, "("); - } - if (params.length == 0) { - if (topMode) { - puts(0, ") {"); - puts(indent, " return this;\n", "}"); - } - } else if (params.length == 1) { - if (methodName.equals("base")) { - puts(0, "String href) {"); - puts(indent, - " return base().$href(href)._();\n", "}"); - } else if (methodName.equals("script")) { - puts(0, "String src) {"); - puts(indent, - " return setScriptSrc(script(), src)._();\n", "}"); - } else if (methodName.equals("style")) { - puts(0, "Object... lines) {"); - puts(indent, - " return style().$type(\"text/css\")._(lines)._();\n", "}"); - } else if (methodName.equals("img")) { - puts(0, "String src) {"); - puts(indent, - " return ", methodName, "().$src(src)._();\n", "}"); - } else if (methodName.equals("br") || methodName.equals("hr") || - methodName.equals("col")) { - puts(0, "String selector) {"); - puts(indent, - " return setSelector(", methodName, "(), selector)._();\n", "}"); - } else if (methodName.equals("link")) { - puts(0, "String href) {"); - puts(indent, - " return setLinkHref(", methodName, "(), href)._();\n", "}"); - } else if (methodName.equals("_")) { - if (params[0].getSimpleName().equals("Class")) { - puts(0, "Class cls) {"); - puts(indent, - " ", topMode ? "subView" : "_v", "(cls);\n", - " return this;\n", "}"); - } else { - puts(0, "Object... lines) {"); - puts(indent, - " _p(", needsEscaping(className), ", lines);\n", - " return this;\n", "}"); - } - } else if (methodName.equals("_r")) { - puts(0, "Object... lines) {"); - puts(indent, - " _p(false, lines);\n", - " return this;\n", "}"); - } else { - puts(0, "String cdata) {"); - puts(indent, - " return ", methodName, "()._(cdata)._();\n", "}"); - } - } else if (params.length == 2) { - if (methodName.equals("meta")) { - puts(0, "String name, String content) {"); - puts(indent, - " return meta().$name(name).$content(content)._();\n", "}"); - } else if (methodName.equals("meta_http")) { - puts(0, "String header, String content) {"); - puts(indent, - " return meta().$http_equiv(header).$content(content)._();\n", - "}"); - } else if (methodName.equals("a")) { - puts(0, "String href, String anchorText) {"); - puts(indent, - " return a().$href(href)._(anchorText)._();\n", "}"); - } else if (methodName.equals("bdo")) { - puts(0, "Dir dir, String cdata) {"); - puts(indent, " return bdo().$dir(dir)._(cdata)._();\n", "}"); - } else if (methodName.equals("label")) { - puts(0, "String forId, String cdata) {"); - puts(indent, " return label().$for(forId)._(cdata)._();\n", "}"); - } else if (methodName.equals("param")) { - puts(0, "String name, String value) {"); - puts(indent, - " return param().$name(name).$value(value)._();\n", "}"); - } else { - puts(0, "String selector, String cdata) {"); - puts(indent, - " return setSelector(", methodName, - "(), selector)._(cdata)._();\n", "}"); - } - } else if (params.length == 3) { - if (methodName.equals("a")) { - puts(0, "String selector, String href, String anchorText) {"); - puts(indent, - " return setSelector(a(), selector)", - ".$href(href)._(anchorText)._();\n", "}"); - } - } else { - throwUnhandled(className, method); - } - } - - static boolean needsEscaping(String eleName) { - return !eleName.equals("SCRIPT") && !eleName.equals("STYLE"); - } - - static void throwUnhandled(String className, Method method) { - throw new WebAppException("Unhandled " + className + "#" + method); - } - - void echo(int indent, Object... args) { - String prev = null; - for (Object o : args) { - String s = String.valueOf(o); - if (!s.isEmpty() && !s.equals("\n") && - (prev == null || prev.endsWith("\n"))) { - indent(indent); - } - prev = s; - out.print(s); - bytes += s.length(); - } - } - - void indent(int indent) { - for (int i = 0; i < indent; ++i) { - out.print(" "); - bytes += 2; - } - } - - void puts(int indent, Object... args) { - echo(indent, args); - out.println(); - ++bytes; - } - - boolean isElement(String s) { - return elementRegex.matcher(s).matches(); - } - - public static void main(String[] args) throws Exception { - CommandLine cmd = new GnuParser().parse(opts, args); - if (cmd.hasOption("help")) { - new HelpFormatter().printHelp("Usage: hbgen [OPTIONS]", opts); - return; - } - // defaults - Class specClass = HamletSpec.class; - Class implClass = HamletImpl.class; - String outputClass = "HamletTmp"; - String outputPackage = implClass.getPackage().getName(); - if (cmd.hasOption("spec-class")) { - specClass = Class.forName(cmd.getOptionValue("spec-class")); - } - if (cmd.hasOption("impl-class")) { - implClass = Class.forName(cmd.getOptionValue("impl-class")); - } - if (cmd.hasOption("output-class")) { - outputClass = cmd.getOptionValue("output-class"); - } - if (cmd.hasOption("output-package")) { - outputPackage = cmd.getOptionValue("output-package"); - } - new HamletGen().generate(specClass, implClass, outputClass, outputPackage); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java deleted file mode 100644 index 375fb6045d6a3..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java +++ /dev/null @@ -1,387 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn.webapp.hamlet; - -import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; -import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; -import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; - -import java.io.PrintWriter; -import java.util.EnumSet; -import static java.util.EnumSet.*; -import java.util.Iterator; - -import static org.apache.commons.text.StringEscapeUtils.*; -import static org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.EOpt.*; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.WebAppException; - - -/** - * A simple unbuffered generic hamlet implementation. - * - * Zero copy but allocation on every element, which could be - * optimized to use a thread-local element pool. - * - * Prints HTML as it builds. So the order is important. - * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead. - */ -@Deprecated -@InterfaceAudience.Private -public class HamletImpl extends HamletSpec { - private static final String INDENT_CHARS = " "; - private static final Splitter SS = Splitter.on('.'). - omitEmptyStrings().trimResults(); - private static final Joiner SJ = Joiner.on(' '); - private static final Joiner CJ = Joiner.on(", "); - static final int S_ID = 0; - static final int S_CLASS = 1; - - int nestLevel; - int indents; // number of indent() called. mostly for testing. - private final PrintWriter out; - private final StringBuilder sb = new StringBuilder(); // not shared - private boolean wasInline = false; - - /** - * Element options. (whether it needs end tag, is inline etc.) - */ - public enum EOpt { - /** needs end(close) tag */ - ENDTAG, - /** The content is inline */ - INLINE, - /** The content is preformatted */ - PRE - }; - - /** - * The base class for elements - * @param type of the parent (containing) element for the element - */ - public class EImp implements _Child { - private final String name; - private final T parent; // short cut for parent element - private final EnumSet opts; // element options - - private boolean started = false; - private boolean attrsClosed = false; - - EImp(String name, T parent, EnumSet opts) { - this.name = name; - this.parent = parent; - this.opts = opts; - } - - @Override - public T _() { - closeAttrs(); - --nestLevel; - printEndTag(name, opts); - return parent; - } - - protected void _p(boolean quote, Object... args) { - closeAttrs(); - for (Object s : args) { - if (!opts.contains(PRE)) { - indent(opts); - } - out.print(quote ? escapeHtml4(String.valueOf(s)) - : String.valueOf(s)); - if (!opts.contains(INLINE) && !opts.contains(PRE)) { - out.println(); - } - } - } - - protected void _v(Class cls) { - closeAttrs(); - subView(cls); - } - - protected void closeAttrs() { - if (!attrsClosed) { - startIfNeeded(); - ++nestLevel; - out.print('>'); - if (!opts.contains(INLINE) && !opts.contains(PRE)) { - out.println(); - } - attrsClosed = true; - } - } - - protected void addAttr(String name, String value) { - checkState(!attrsClosed, "attribute added after content"); - startIfNeeded(); - printAttr(name, value); - } - - protected void addAttr(String name, Object value) { - addAttr(name, String.valueOf(value)); - } - - protected void addMediaAttr(String name, EnumSet media) { - // 6.13 comma-separated list - addAttr(name, CJ.join(media)); - } - - protected void addRelAttr(String name, EnumSet types) { - // 6.12 space-separated list - addAttr(name, SJ.join(types)); - } - - private void startIfNeeded() { - if (!started) { - printStartTag(name, opts); - started = true; - } - } - - protected void _inline(boolean choice) { - if (choice) { - opts.add(INLINE); - } else { - opts.remove(INLINE); - } - } - - protected void _endTag(boolean choice) { - if (choice) { - opts.add(ENDTAG); - } else { - opts.remove(ENDTAG); - } - } - - protected void _pre(boolean choice) { - if (choice) { - opts.add(PRE); - } else { - opts.remove(PRE); - } - } - } - - public class Generic extends EImp implements PCData { - Generic(String name, T parent, EnumSet opts) { - super(name, parent, opts); - } - - public Generic _inline() { - super._inline(true); - return this; - } - - public Generic _noEndTag() { - super._endTag(false); - return this; - } - - public Generic _pre() { - super._pre(true); - return this; - } - - public Generic _attr(String name, String value) { - addAttr(name, value); - return this; - } - - public Generic> _elem(String name, EnumSet opts) { - closeAttrs(); - return new Generic>(name, this, opts); - } - - public Generic> elem(String name) { - return _elem(name, of(ENDTAG)); - } - - @Override - public Generic _(Object... lines) { - _p(true, lines); - return this; - } - - @Override - public Generic _r(Object... lines) { - _p(false, lines); - return this; - } - } - - public HamletImpl(PrintWriter out, int nestLevel, boolean wasInline) { - this.out = out; - this.nestLevel = nestLevel; - this.wasInline = wasInline; - } - - public int nestLevel() { - return nestLevel; - } - - public boolean wasInline() { - return wasInline; - } - - public void setWasInline(boolean state) { - wasInline = state; - } - - public PrintWriter getWriter() { - return out; - } - - /** - * Create a root-level generic element. - * Mostly for testing purpose. - * @param type of the parent element - * @param name of the element - * @param opts {@link EOpt element options} - * @return the element - */ - public - Generic root(String name, EnumSet opts) { - return new Generic(name, null, opts); - } - - public Generic root(String name) { - return root(name, of(ENDTAG)); - } - - protected void printStartTag(String name, EnumSet opts) { - indent(opts); - sb.setLength(0); - out.print(sb.append('<').append(name).toString()); // for easier mock test - } - - protected void indent(EnumSet opts) { - if (opts.contains(INLINE) && wasInline) { - return; - } - if (wasInline) { - out.println(); - } - wasInline = opts.contains(INLINE) || opts.contains(PRE); - for (int i = 0; i < nestLevel; ++i) { - out.print(INDENT_CHARS); - } - ++indents; - } - - protected void printEndTag(String name, EnumSet opts) { - if (!opts.contains(ENDTAG)) { - return; - } - if (!opts.contains(PRE)) { - indent(opts); - } else { - wasInline = opts.contains(INLINE); - } - sb.setLength(0); - out.print(sb.append("').toString()); // ditto - if (!opts.contains(INLINE)) { - out.println(); - } - } - - protected void printAttr(String name, String value) { - sb.setLength(0); - sb.append(' ').append(name); - if (value != null) { - sb.append("=\"").append(escapeHtml4(value)).append("\""); - } - out.print(sb.toString()); - } - - /** - * Sub-classes should override this to do something interesting. - * @param cls the sub-view class - */ - protected void subView(Class cls) { - indent(of(ENDTAG)); // not an inline view - sb.setLength(0); - out.print(sb.append('[').append(cls.getName()).append(']').toString()); - out.println(); - } - - /** - * Parse selector into id and classes - * @param selector in the form of (#id)?(.class)* - * @return an two element array [id, "space-separated classes"]. - * Either element could be null. - * @throws WebAppException when both are null or syntax error. - */ - public static String[] parseSelector(String selector) { - String[] result = new String[]{null, null}; - Iterable rs = SS.split(selector); - Iterator it = rs.iterator(); - if (it.hasNext()) { - String maybeId = it.next(); - if (maybeId.charAt(0) == '#') { - result[S_ID] = maybeId.substring(1); - if (it.hasNext()) { - result[S_CLASS] = SJ.join(Iterables.skip(rs, 1)); - } - } else { - result[S_CLASS] = SJ.join(rs); - } - return result; - } - throw new WebAppException("Error parsing selector: "+ selector); - } - - /** - * Set id and/or class attributes for an element. - * @param type of the element - * @param e the element - * @param selector Haml form of "(#id)?(.class)*" - * @return the element - */ - public static E setSelector(E e, String selector) { - String[] res = parseSelector(selector); - if (res[S_ID] != null) { - e.$id(res[S_ID]); - } - if (res[S_CLASS] != null) { - e.$class(res[S_CLASS]); - } - return e; - } - - public static E setLinkHref(E e, String href) { - if (href.endsWith(".css")) { - e.$rel("stylesheet"); // required in html5 - } - e.$href(href); - return e; - } - - public static E setScriptSrc(E e, String src) { - if (src.endsWith(".js")) { - e.$type("text/javascript"); // required in html4 - } - e.$src(src); - return e; - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java deleted file mode 100644 index e3bb6d1add825..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java +++ /dev/null @@ -1,3103 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn.webapp.hamlet; - -import java.lang.annotation.*; -import java.util.EnumSet; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.yarn.webapp.SubView; - -/** - * HTML5 compatible HTML4 builder interfaces. - * - *

    Generated from HTML 4.01 strict DTD and HTML5 diffs. - *
    cf. http://www.w3.org/TR/html4/ - *
    cf. http://www.w3.org/TR/html5-diff/ - *

    The omitted attributes and elements (from the 4.01 DTD) - * are for HTML5 compatibility. - * - *

    Note, the common argument selector uses the same syntax as Haml/Sass: - *

      selector ::= (#id)?(.class)*
    - * cf. http://haml-lang.com/ - * - *

    The naming convention used in this class is slightly different from - * normal classes. A CamelCase interface corresponds to an entity in the DTD. - * _CamelCase is for internal refactoring. An element builder interface is in - * UPPERCASE, corresponding to an element definition in the DTD. $lowercase is - * used as attribute builder methods to differentiate from element builder - * methods. - * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead. - */ -@Deprecated -@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) -public class HamletSpec { - // The enum values are lowercase for better compression, - // while avoiding runtime conversion. - // cf. http://www.w3.org/Protocols/HTTP/Performance/Compression/HTMLCanon.html - // http://www.websiteoptimization.com/speed/tweak/lowercase/ - /** %Shape (case-insensitive) */ - public enum Shape { - /** - * rectangle - */ - rect, - /** - * circle - */ - circle, - /** - * polygon - */ - poly, - /** - * default - */ - Default - }; - - /** Values for the %18n dir attribute (case-insensitive) */ - public enum Dir { - /** - * left to right - */ - ltr, - /** - * right to left - */ - rtl - }; - - /** %MediaDesc (case-sensitive) */ - public enum Media { - /** - * computer screen - */ - screen, - /** - * teletype/terminal - */ - tty, - /** - * television - */ - tv, - /** - * projection - */ - projection, - /** - * mobile device - */ - handheld, - /** - * print media - */ - print, - /** - * braille - */ - braille, - /** - * aural - */ - aural, - /** - * suitable all media - */ - all - }; - - /** %LinkTypes (case-insensitive) */ - public enum LinkType { - /** - * - */ - alternate, - /** - * - */ - stylesheet, - /** - * - */ - start, - /** - * - */ - next, - /** - * - */ - prev, - /** - * - */ - contents, - /** - * - */ - index, - /** - * - */ - glossary, - /** - * - */ - copyright, - /** - * - */ - chapter, - /** - * - */ - section, - /** - * - */ - subsection, - /** - * - */ - appendix, - /** - * - */ - help, - /** - * - */ - bookmark - }; - - /** Values for form methods (case-insensitive) */ - public enum Method { - /** - * HTTP GET - */ - get, - /** - * HTTP POST - */ - post - }; - - /** %InputType (case-insensitive) */ - public enum InputType { - /** - * - */ - text, - /** - * - */ - password, - /** - * - */ - checkbox, - /** - * - */ - radio, - /** - * - */ - submit, - /** - * - */ - reset, - /** - * - */ - file, - /** - * - */ - hidden, - /** - * - */ - image, - /** - * - */ - button - }; - - /** Values for button types */ - public enum ButtonType { - /** - * - */ - button, - /** - * - */ - submit, - /** - * - */ - reset - }; - - /** %Scope (case-insensitive) */ - public enum Scope { - /** - * - */ - row, - /** - * - */ - col, - /** - * - */ - rowgroup, - /** - * - */ - colgroup - }; - - /** - * The element annotation for specifying element options other than - * attributes and allowed child elements - */ - @Target({ElementType.TYPE}) - @Retention(RetentionPolicy.RUNTIME) - public @interface Element { - /** - * Whether the start tag is required for the element. - * @return true if start tag is required - */ - boolean startTag() default true; - - /** - * Whether the end tag is required. - * @return true if end tag is required - */ - boolean endTag() default true; - } - - /** - * - */ - public interface _ {} - - /** - * - */ - public interface _Child extends _ { - /** - * Finish the current element. - * @return the parent element - */ - _ _(); - } - - /** - * - */ - public interface _Script { - /** - * Add a script element. - * @return a script element builder - */ - SCRIPT script(); - - /** - * Add a script element - * @param src uri of the script - * @return the current element builder - */ - _Script script(String src); - } - - /** - * - */ - public interface _Object { - /** - * Add an object element. - * @return an object element builder - */ - OBJECT object(); - - /** - * Add an object element. - * @param selector as #id.class etc. - * @return an object element builder - */ - OBJECT object(String selector); - } - - /** %head.misc */ - public interface HeadMisc extends _Script, _Object { - /** - * Add a style element. - * @return a style element builder - */ - STYLE style(); - - /** - * Add a css style element. - * @param lines content of the style sheet - * @return the current element builder - */ - HeadMisc style(Object... lines); - - /** - * Add a meta element. - * @return a meta element builder - */ - META meta(); - - /** - * Add a meta element. - * Shortcut of meta().$name(name).$content(content)._(); - * @param name of the meta element - * @param content of the meta element - * @return the current element builder - */ - HeadMisc meta(String name, String content); - - /** - * Add a meta element with http-equiv attribute. - * Shortcut of
    - * meta().$http_equiv(header).$content(content)._(); - * @param header for the http-equiv attribute - * @param content of the header - * @return the current element builder - */ - HeadMisc meta_http(String header, String content); - - /** - * Add a link element. - * @return a link element builder - */ - LINK link(); - - /** - * Add a link element. - * Implementation should try to figure out type by the suffix of href. - * So link("style.css"); is a shortcut of - * link().$rel("stylesheet").$type("text/css").$href("style.css")._(); - * - * @param href of the link - * @return the current element builder - */ - HeadMisc link(String href); - } - - /** %heading */ - public interface Heading { - /** - * Add an H1 element. - * @return a new H1 element builder - */ - H1 h1(); - - /** - * Add a complete H1 element. - * @param cdata the content of the element - * @return the current element builder - */ - Heading h1(String cdata); - - /** - * Add a complete H1 element - * @param selector the css selector in the form of (#id)?(.class)* - * @param cdata the content of the element - * @return the current element builder - */ - Heading h1(String selector, String cdata); - - /** - * Add an H2 element. - * @return a new H2 element builder - */ - H2 h2(); - - /** - * Add a complete H2 element. - * @param cdata the content of the element - * @return the current element builder - */ - Heading h2(String cdata); - - /** - * Add a complete H1 element - * @param selector the css selector in the form of (#id)?(.class)* - * @param cdata the content of the element - * @return the current element builder - */ - Heading h2(String selector, String cdata); - - /** - * Add an H3 element. - * @return a new H3 element builder - */ - H3 h3(); - - /** - * Add a complete H3 element. - * @param cdata the content of the element - * @return the current element builder - */ - Heading h3(String cdata); - - /** - * Add a complete H1 element - * @param selector the css selector in the form of (#id)?(.class)* - * @param cdata the content of the element - * @return the current element builder - */ - Heading h3(String selector, String cdata); - - /** - * Add an H4 element. - * @return a new H4 element builder - */ - H4 h4(); - - /** - * Add a complete H4 element. - * @param cdata the content of the element - * @return the current element builder - */ - Heading h4(String cdata); - - /** - * Add a complete H4 element - * @param selector the css selector in the form of (#id)?(.class)* - * @param cdata the content of the element - * @return the current element builder - */ - Heading h4(String selector, String cdata); - - /** - * Add an H5 element. - * @return a new H5 element builder - */ - H5 h5(); - - /** - * Add a complete H5 element. - * @param cdata the content of the element - * @return the current element builder - */ - Heading h5(String cdata); - - /** - * Add a complete H5 element - * @param selector the css selector in the form of (#id)?(.class)* - * @param cdata the content of the element - * @return the current element builder - */ - Heading h5(String selector, String cdata); - - /** - * Add an H6 element. - * @return a new H6 element builder - */ - H6 h6(); - - /** - * Add a complete H6 element. - * @param cdata the content of the element - * @return the current element builder - */ - Heading h6(String cdata); - - /** - * Add a complete H6 element. - * @param selector the css selector in the form of (#id)?(.class)* - * @param cdata the content of the element - * @return the current element builder - */ - Heading h6(String selector, String cdata); - } - - /** %list */ - public interface Listing { - - /** - * Add a UL (unordered list) element. - * @return a new UL element builder - */ - UL ul(); - - /** - * Add a UL (unordered list) element. - * @param selector the css selector in the form of (#id)?(.class)* - * @return a new UL element builder - */ - UL ul(String selector); - - /** - * Add a OL (ordered list) element. - * @return a new UL element builder - */ - OL ol(); - - /** - * Add a OL (ordered list) element. - * @param selector the css selector in the form of (#id)?(.class)* - * @return a new UL element builder - */ - OL ol(String selector); - } - - /** % preformatted */ - public interface Preformatted { - - /** - * Add a PRE (preformatted) element. - * @return a new PRE element builder - */ - PRE pre(); - - /** - * Add a PRE (preformatted) element. - * @param selector the css selector in the form of (#id)?(.class)* - * @return a new PRE element builder - */ - PRE pre(String selector); - } - - /** %coreattrs */ - public interface CoreAttrs { - /** document-wide unique id - * @param id the id - * @return the current element builder - */ - CoreAttrs $id(String id); - - /** space-separated list of classes - * @param cls the classes - * @return the current element builder - */ - CoreAttrs $class(String cls); - - /** associated style info - * @param style the style - * @return the current element builder - */ - CoreAttrs $style(String style); - - /** advisory title - * @param title the title - * @return the current element builder - */ - CoreAttrs $title(String title); - } - - /** %i18n */ - public interface I18nAttrs { - /** language code - * @param lang the code - * @return the current element builder - */ - I18nAttrs $lang(String lang); - - /** direction for weak/neutral text - * @param dir the {@link Dir} value - * @return the current element builder - */ - I18nAttrs $dir(Dir dir); - } - - /** %events */ - public interface EventsAttrs { - - /** a pointer button was clicked - * @param onclick the script - * @return the current element builder - */ - EventsAttrs $onclick(String onclick); - - /** a pointer button was double clicked - * @param ondblclick the script - * @return the current element builder - */ - EventsAttrs $ondblclick(String ondblclick); - - /** a pointer button was pressed down - * @param onmousedown the script - * @return the current element builder - */ - EventsAttrs $onmousedown(String onmousedown); - - /** a pointer button was released - * @param onmouseup the script - * @return the current element builder - */ - EventsAttrs $onmouseup(String onmouseup); - - /** a pointer was moved onto - * @param onmouseover the script - * @return the current element builder - */ - EventsAttrs $onmouseover(String onmouseover); - - /** a pointer was moved within - * @param onmousemove the script - * @return the current element builder - */ - EventsAttrs $onmousemove(String onmousemove); - - /** a pointer was moved away - * @param onmouseout the script - * @return the current element builder - */ - EventsAttrs $onmouseout(String onmouseout); - - /** a key was pressed and released - * @param onkeypress the script - * @return the current element builder - */ - EventsAttrs $onkeypress(String onkeypress); - - /** a key was pressed down - * @param onkeydown the script - * @return the current element builder - */ - EventsAttrs $onkeydown(String onkeydown); - - /** a key was released - * @param onkeyup the script - * @return the current element builder - */ - EventsAttrs $onkeyup(String onkeyup); - } - - /** %attrs */ - public interface Attrs extends CoreAttrs, I18nAttrs, EventsAttrs { - } - - /** Part of %pre.exclusion */ - public interface _FontSize extends _Child { - // BIG omitted cf. http://www.w3.org/TR/html5-diff/ - - /** - * Add a SMALL (small print) element - * @return a new SMALL element builder - */ - SMALL small(); - - /** - * Add a complete small (small print) element. - * Shortcut of: small()._(cdata)._(); - * @param cdata the content of the element - * @return the current element builder - */ - _FontSize small(String cdata); - - /** - * Add a complete small (small print) element. - * Shortcut of: small().$id(id).$class(class)._(cdata)._(); - * @param selector css selector in the form of (#id)?(.class)* - * @param cdata the content of the element - * @return the current element builder - */ - _FontSize small(String selector, String cdata); - } - - /** %fontstyle -(%pre.exclusion) */ - public interface _FontStyle extends _Child { - // TT omitted - - /** - * Add an I (italic, alt voice/mood) element. - * @return the new I element builder - */ - I i(); - - /** - * Add a complete I (italic, alt voice/mood) element. - * @param cdata the content of the element - * @return the current element builder - */ - _FontStyle i(String cdata); - - /** - * Add a complete I (italic, alt voice/mood) element. - * @param selector the css selector in the form of (#id)?(.class)* - * @param cdata the content of the element - * @return the current element builder - */ - _FontStyle i(String selector, String cdata); - - /** - * Add a new B (bold/important) element. - * @return a new B element builder - */ - B b(); - - /** - * Add a complete B (bold/important) element. - * @param cdata the content - * @return the current element builder - */ - _FontStyle b(String cdata); - - /** - * Add a complete B (bold/important) element. - * @param selector the css select (#id)?(.class)* - * @param cdata the content - * @return the current element builder - */ - _FontStyle b(String selector, String cdata); - } - - /** %fontstyle */ - public interface FontStyle extends _FontStyle, _FontSize { - } - - /** %phrase */ - public interface Phrase extends _Child { - - /** - * Add an EM (emphasized) element. - * @return a new EM element builder - */ - EM em(); - - /** - * Add an EM (emphasized) element. - * @param cdata the content - * @return the current element builder - */ - Phrase em(String cdata); - - /** - * Add an EM (emphasized) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - Phrase em(String selector, String cdata); - - /** - * Add a STRONG (important) element. - * @return a new STRONG element builder - */ - STRONG strong(); - - /** - * Add a complete STRONG (important) element. - * @param cdata the content - * @return the current element builder - */ - Phrase strong(String cdata); - - /** - * Add a complete STRONG (important) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - Phrase strong(String selector, String cdata); - - /** - * Add a DFN element. - * @return a new DFN element builder - */ - DFN dfn(); - - /** - * Add a complete DFN element. - * @param cdata the content - * @return the current element builder - */ - Phrase dfn(String cdata); - - /** - * Add a complete DFN element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - Phrase dfn(String selector, String cdata); - - /** - * Add a CODE (code fragment) element. - * @return a new CODE element builder - */ - CODE code(); - - /** - * Add a complete CODE element. - * @param cdata the code - * @return the current element builder - */ - Phrase code(String cdata); - - /** - * Add a complete CODE element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the code - * @return the current element builder - */ - Phrase code(String selector, String cdata); - - /** - * Add a SAMP (sample) element. - * @return a new SAMP element builder - */ - SAMP samp(); - - /** - * Add a complete SAMP (sample) element. - * @param cdata the content - * @return the current element builder - */ - Phrase samp(String cdata); - - /** - * Add a complete SAMP (sample) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - Phrase samp(String selector, String cdata); - - /** - * Add a KBD (keyboard) element. - * @return a new KBD element builder - */ - KBD kbd(); - - /** - * Add a KBD (keyboard) element. - * @param cdata the content - * @return the current element builder - */ - Phrase kbd(String cdata); - - /** - * Add a KBD (keyboard) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - Phrase kbd(String selector, String cdata); - - /** - * Add a VAR (variable) element. - * @return a new VAR element builder - */ - VAR var(); - - /** - * Add a VAR (variable) element. - * @param cdata the content - * @return the current element builder - */ - Phrase var(String cdata); - - /** - * Add a VAR (variable) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - Phrase var(String selector, String cdata); - - /** - * Add a CITE element. - * @return a new CITE element builder - */ - CITE cite(); - - /** - * Add a CITE element. - * @param cdata the content - * @return the current element builder - */ - Phrase cite(String cdata); - - /** - * Add a CITE element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - Phrase cite(String selector, String cdata); - - /** - * Add an ABBR (abbreviation) element. - * @return a new ABBR element builder - */ - ABBR abbr(); - - /** - * Add a ABBR (abbreviation) element. - * @param cdata the content - * @return the current element builder - */ - Phrase abbr(String cdata); - - /** - * Add a ABBR (abbreviation) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - Phrase abbr(String selector, String cdata); - - // ACRONYM omitted, use ABBR - } - - /** Part of %pre.exclusion */ - public interface _ImgObject extends _Object, _Child { - - /** - * Add a IMG (image) element. - * @return a new IMG element builder - */ - IMG img(); - - /** - * Add a IMG (image) element. - * @param src the source URL of the image - * @return the current element builder - */ - _ImgObject img(String src); - } - - /** Part of %pre.exclusion */ - public interface _SubSup extends _Child { - - /** - * Add a SUB (subscript) element. - * @return a new SUB element builder - */ - SUB sub(); - - /** - * Add a complete SUB (subscript) element. - * @param cdata the content - * @return the current element builder - */ - _SubSup sub(String cdata); - - /** - * Add a complete SUB (subscript) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - _SubSup sub(String selector, String cdata); - - /** - * Add a SUP (superscript) element. - * @return a new SUP element builder - */ - SUP sup(); - - /** - * Add a SUP (superscript) element. - * @param cdata the content - * @return the current element builder - */ - _SubSup sup(String cdata); - - /** - * Add a SUP (superscript) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - _SubSup sup(String selector, String cdata); - } - - /** - * - */ - public interface _Anchor { - - /** - * Add a A (anchor) element. - * @return a new A element builder - */ - A a(); - - /** - * Add a A (anchor) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new A element builder - */ - A a(String selector); - - /** Shortcut for a().$href(href)._(anchorText)._(); - * @param href the URI - * @param anchorText for the URI - * @return the current element builder - */ - _Anchor a(String href, String anchorText); - - /** Shortcut for a(selector).$href(href)._(anchorText)._(); - * @param selector in the form of (#id)?(.class)* - * @param href the URI - * @param anchorText for the URI - * @return the current element builder - */ - _Anchor a(String selector, String href, String anchorText); - } - - /** - * INS and DEL are unusual for HTML - * "in that they may serve as either block-level or inline elements - * (but not both)". - *
    cf. http://www.w3.org/TR/html4/struct/text.html#h-9.4 - *
    cf. http://www.w3.org/TR/html5/edits.html#edits - */ - public interface _InsDel { - - /** - * Add an INS (insert) element. - * @return an INS element builder - */ - INS ins(); - - /** - * Add a complete INS element. - * @param cdata inserted data - * @return the current element builder - */ - _InsDel ins(String cdata); - - /** - * Add a DEL (delete) element. - * @return a DEL element builder - */ - DEL del(); - - /** - * Add a complete DEL element. - * @param cdata deleted data - * @return the current element builder - */ - _InsDel del(String cdata); - } - - /** %special -(A|%pre.exclusion) */ - public interface _Special extends _Script, _InsDel { - - /** - * Add a BR (line break) element. - * @return a new BR element builder - */ - BR br(); - - /** - * Add a BR (line break) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return the current element builder - */ - _Special br(String selector); - - /** - * Add a MAP element. - * @return a new MAP element builder - */ - MAP map(); - - /** - * Add a MAP element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new MAP element builder - */ - MAP map(String selector); - - /** - * Add a Q (inline quotation) element. - * @return a q (inline quotation) element builder - */ - Q q(); - - /** - * Add a complete Q element. - * @param cdata the content - * @return the current element builder - */ - _Special q(String cdata); - - /** - * Add a Q element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - _Special q(String selector, String cdata); - - /** - * Add a SPAN element. - * @return a new SPAN element builder - */ - SPAN span(); - - /** - * Add a SPAN element. - * @param cdata the content - * @return the current element builder - */ - _Special span(String cdata); - - /** - * Add a SPAN element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - _Special span(String selector, String cdata); - - /** - * Add a bdo (bidirectional override) element - * @return a bdo element builder - */ - BDO bdo(); - - /** - * Add a bdo (bidirectional override) element - * @param dir the direction of the text - * @param cdata the text - * @return the current element builder - */ - _Special bdo(Dir dir, String cdata); - } - - /** %special */ - public interface Special extends _Anchor, _ImgObject, _SubSup, _Special { - } - - /** - * - */ - public interface _Label extends _Child { - - /** - * Add a LABEL element. - * @return a new LABEL element builder - */ - LABEL label(); - - /** - * Add a LABEL element. - * Shortcut of label().$for(forId)._(cdata)._(); - * @param forId the for attribute - * @param cdata the content - * @return the current element builder - */ - _Label label(String forId, String cdata); - } - - /** - * - */ - public interface _FormCtrl { - - /** - * Add a INPUT element. - * @return a new INPUT element builder - */ - INPUT input(); - - /** - * Add a INPUT element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new INPUT element builder - */ - INPUT input(String selector); - - /** - * Add a SELECT element. - * @return a new SELECT element builder - */ - SELECT select(); - - /** - * Add a SELECT element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new SELECT element builder - */ - SELECT select(String selector); - - /** - * Add a TEXTAREA element. - * @return a new TEXTAREA element builder - */ - TEXTAREA textarea(); - - /** - * Add a TEXTAREA element. - * @param selector - * @return a new TEXTAREA element builder - */ - TEXTAREA textarea(String selector); - - /** - * Add a complete TEXTAREA element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - _FormCtrl textarea(String selector, String cdata); - - /** - * Add a BUTTON element. - * @return a new BUTTON element builder - */ - BUTTON button(); - - /** - * Add a BUTTON element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new BUTTON element builder - */ - BUTTON button(String selector); - - /** - * Add a complete BUTTON element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - _FormCtrl button(String selector, String cdata); - } - - /** %formctrl */ - public interface FormCtrl extends _Label, _FormCtrl { - } - - /** - * - */ - public interface _Content extends _Child { - /** - * Content of the element - * @param lines of content - * @return the current element builder - */ - _Content _(Object... lines); - } - - /** - * - */ - public interface _RawContent extends _Child { - /** - * Raw (no need to be HTML escaped) content - * @param lines of content - * @return the current element builder - */ - _RawContent _r(Object... lines); - } - - /** #PCDATA */ - public interface PCData extends _Content, _RawContent { - } - - /** %inline */ - public interface Inline extends PCData, FontStyle, Phrase, Special, FormCtrl { - } - - /** - * - */ - public interface I extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface B extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface SMALL extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface EM extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface STRONG extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface DFN extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface CODE extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface SAMP extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface KBD extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface VAR extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface CITE extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface ABBR extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface ACRONYM extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface SUB extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface SUP extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface SPAN extends Attrs, Inline, _Child { - } - - /** The dir attribute is required for the BDO element */ - public interface BDO extends CoreAttrs, I18nAttrs, Inline, _Child { - } - - /** - * - */ - @Element(endTag=false) - public interface BR extends CoreAttrs, _Child { - } - - /** - * - */ - public interface _Form { - - /** - * Add a FORM element. - * @return a new FORM element builder - */ - FORM form(); - - /** - * Add a FORM element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new FORM element builder - */ - FORM form(String selector); - } - - /** - * - */ - public interface _FieldSet { - - /** - * Add a FIELDSET element. - * @return a new FIELDSET element builder - */ - FIELDSET fieldset(); - - /** - * Add a FIELDSET element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new FIELDSET element builder - */ - FIELDSET fieldset(String selector); - } - - /** %block -(FORM|FIELDSET) */ - public interface _Block extends Heading, Listing, Preformatted { - - /** - * Add a P (paragraph) element. - * @return a new P element builder - */ - P p(); - - /** - * Add a P (paragraph) element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new P element builder - */ - P p(String selector); - - /** - * Add a DL (description list) element. - * @return a new DL element builder - */ - DL dl(); - - /** - * Add a DL element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new DL element builder - */ - DL dl(String selector); - - /** - * Add a DIV element. - * @return a new DIV element builder - */ - DIV div(); - - /** - * Add a DIV element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new DIV element builder - */ - DIV div(String selector); - - // NOSCRIPT omitted - // cf. http://www.w3.org/html/wg/tracker/issues/117 - - /** - * Add a BLOCKQUOTE element. - * @return a new BLOCKQUOTE element builder - */ - BLOCKQUOTE blockquote(); - - /** - * Alias of blockquote - * @return a new BLOCKQUOTE element builder - */ - BLOCKQUOTE bq(); - - /** - * Add a HR (horizontal rule) element. - * @return a new HR element builder - */ - HR hr(); - - /** - * Add a HR element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new HR element builder - */ - _Block hr(String selector); - - /** - * Add a TABLE element. - * @return a new TABLE element builder - */ - TABLE table(); - - /** - * Add a TABLE element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new TABLE element builder - */ - TABLE table(String selector); - - /** - * Add a ADDRESS element. - * @return a new ADDRESS element builder - */ - ADDRESS address(); - - /** - * Add a complete ADDRESS element. - * @param cdata the content - * @return the current element builder - */ - _Block address(String cdata); - - /** - * Embed a sub-view. - * @param cls the sub-view class - * @return the current element builder - */ - _Block _(Class cls); - } - - /** %block */ - public interface Block extends _Block, _Form, _FieldSet { - } - - /** %flow */ - public interface Flow extends Block, Inline { - } - - /** - * - */ - public interface _Body extends Block, _Script, _InsDel { - } - - /** - * - */ - public interface BODY extends Attrs, _Body, _Child { - - /** - * The document has been loaded. - * @param script to invoke - * @return the current element builder - */ - BODY $onload(String script); - - /** - * The document has been removed - * @param script to invoke - * @return the current element builder - */ - BODY $onunload(String script); - } - - /** - * - */ - public interface ADDRESS extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface DIV extends Attrs, Flow, _Child { - } - - /** - * - */ - public interface A extends Attrs, _Child, /* %inline -(A) */ - PCData, FontStyle, Phrase, _ImgObject, _Special, - _SubSup, FormCtrl { - // $charset omitted. - - /** advisory content type - * @param cdata the content-type - * @return the current element builder - */ - A $type(String cdata); - - // $name omitted. use id instead. - /** URI for linked resource - * @param uri the URI - * @return the current element builder - */ - A $href(String uri); - - /** language code - * @param cdata the code - * @return the current element builder - */ - A $hreflang(String cdata); - - /** forward link types - * @param linkTypes the types - * @return the current element builder - */ - A $rel(EnumSet linkTypes); - - /** - * forward link types - * @param linkTypes space-separated list of link types - * @return the current element builder. - */ - A $rel(String linkTypes); - - // $rev omitted. Instead of rev="made", use rel="author" - - /** accessibility key character - * @param cdata the key - * @return the current element builder - */ - A $accesskey(String cdata); - - // $shape and coords omitted. use area instead of a for image maps. - /** position in tabbing order - * @param index the index - * @return the current element builder - */ - A $tabindex(int index); - - /** the element got the focus - * @param script to invoke - * @return the current element builder - */ - A $onfocus(String script); - - /** the element lost the focus - * @param script to invoke - * @return the current element builder - */ - A $onblur(String script); - } - - /** - * - */ - public interface MAP extends Attrs, Block, _Child { - - /** - * Add a AREA element. - * @return a new AREA element builder - */ - AREA area(); - - /** - * Add a AREA element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new AREA element builder - */ - AREA area(String selector); - - /** for reference by usemap - * @param name of the map - * @return the current element builder - */ - MAP $name(String name); - } - - /** - * - */ - @Element(endTag=false) - public interface AREA extends Attrs, _Child { - - /** controls interpretation of coords - * @param shape of the area - * @return the current element builder - */ - AREA $shape(Shape shape); - - /** comma-separated list of lengths - * @param cdata coords of the area - * @return the current element builder - */ - AREA $coords(String cdata); - - /** URI for linked resource - * @param uri the URI - * @return the current element builder - */ - AREA $href(String uri); - - // $nohref omitted./ - /** short description - * @param desc the description - * @return the current element builder - */ - AREA $alt(String desc); - - /** position in tabbing order - * @param index of the order - * @return the current element builder - */ - AREA $tabindex(int index); - - /** accessibility key character - * @param cdata the key - * @return the current element builder - */ - AREA $accesskey(String cdata); - - /** the element got the focus - * @param script to invoke - * @return the current element builder - */ - AREA $onfocus(String script); - - /** the element lost the focus - * @param script to invoke - * @return the current element builder - */ - AREA $onblur(String script); - } - - /** - * - */ - @Element(endTag=false) - public interface LINK extends Attrs, _Child { - // $charset omitted - /** URI for linked resource - * @param uri the URI - * @return the current element builder - */ - LINK $href(String uri); - - /** language code - * @param cdata the code - * @return the current element builder - */ - LINK $hreflang(String cdata); - - /** advisory content type - * @param cdata the type - * @return the current element builder - */ - LINK $type(String cdata); - - /** forward link types - * @param linkTypes the types - * @return the current element builder - */ - LINK $rel(EnumSet linkTypes); - - /** - * forward link types. - * @param linkTypes space-separated link types - * @return the current element builder - */ - LINK $rel(String linkTypes); - - // $rev omitted. Instead of rev="made", use rel="author" - - /** for rendering on these media - * @param mediaTypes the media types - * @return the current element builder - */ - LINK $media(EnumSet mediaTypes); - - /** - * for rendering on these media. - * @param mediaTypes comma-separated list of media - * @return the current element builder - */ - LINK $media(String mediaTypes); - } - - /** - * - */ - @Element(endTag=false) - public interface IMG extends Attrs, _Child { - - /** URI of image to embed - * @param uri the URI - * @return the current element builder - */ - IMG $src(String uri); - - /** short description - * @param desc the description - * @return the current element builder - */ - IMG $alt(String desc); - - // $longdesc omitted. use instead - // $name omitted. use id instead. - - /** override height - * @param pixels the height - * @return the current element builder - */ - IMG $height(int pixels); - - /** - * override height - * @param cdata the height (can use %, * etc.) - * @return the current element builder - */ - IMG $height(String cdata); - - /** override width - * @param pixels the width - * @return the current element builder - */ - IMG $width(int pixels); - - /** - * override width - * @param cdata the width (can use %, * etc.) - * @return the current element builder - */ - IMG $width(String cdata); - - /** use client-side image map - * @param uri the URI - * @return the current element builder - */ - IMG $usemap(String uri); - - /** use server-side image map - * @return the current element builder - */ - IMG $ismap(); - } - - /** - * - */ - public interface _Param extends _Child { - - /** - * Add a PARAM (parameter) element. - * @return a new PARAM element builder - */ - PARAM param(); - - /** - * Add a PARAM element. - * Shortcut of param().$name(name).$value(value)._(); - * @param name of the value - * @param value the value - * @return the current element builder - */ - _Param param(String name, String value); - } - - /** - * - */ - public interface OBJECT extends Attrs, _Param, Flow, _Child { - // $declare omitted. repeat element completely - - // $archive, classid, codebase, codetype ommited. use data and type - - /** reference to object's data - * @param uri the URI - * @return the current element builder - */ - OBJECT $data(String uri); - - /** content type for data - * @param contentType the type - * @return the current element builder - */ - OBJECT $type(String contentType); - - // $standby omitted. fix the resource instead. - - /** override height - * @param pixels the height - * @return the current element builder - */ - OBJECT $height(int pixels); - - /** - * override height - * @param length the height (can use %, *) - * @return the current element builder - */ - OBJECT $height(String length); - - /** override width - * @param pixels the width - * @return the current element builder - */ - OBJECT $width(int pixels); - - /** - * override width - * @param length the height (can use %, *) - * @return the current element builder - */ - OBJECT $width(String length); - - /** use client-side image map - * @param uri the URI/name of the map - * @return the current element builder - */ - OBJECT $usemap(String uri); - - /** submit as part of form - * @param cdata the name of the object - * @return the current element builder - */ - OBJECT $name(String cdata); - - /** position in tabbing order - * @param index of the order - * @return the current element builder - */ - OBJECT $tabindex(int index); - } - - /** - * - */ - @Element(endTag=false) - public interface PARAM { - - /** document-wide unique id - * @param cdata the id - * @return the current element builder - */ - PARAM $id(String cdata); - - /** property name. Required. - * @param cdata the name - * @return the current element builder - */ - PARAM $name(String cdata); - - /** property value - * @param cdata the value - * @return the current element builder - */ - PARAM $value(String cdata); - - // $type and valuetype omitted - } - - /** - * - */ - @Element(endTag=false) - public interface HR extends Attrs, _Child { - } - - /** - * - */ - @Element(endTag=false) - public interface P extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface H1 extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface H2 extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface H3 extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface H4 extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface H5 extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface H6 extends Attrs, Inline, _Child { - } - - /** - * - */ - public interface PRE extends Attrs, _Child, /* (%inline;)* -(%pre.exclusion) */ - PCData, _FontStyle, Phrase, _Anchor, _Special, - FormCtrl { - } - - /** - * - */ - public interface Q extends Attrs, Inline, _Child { - - /** URI for source document or msg - * @param uri the URI - * @return the current element builder - */ - Q $cite(String uri); - } - - /** - * - */ - public interface BLOCKQUOTE extends Attrs, Block, _Script, _Child { - - /** URI for source document or msg - * @param uri the URI - * @return the current element builder - */ - BLOCKQUOTE $cite(String uri); - } - - /** - * @see _InsDel INS/DEL quirks. - */ - public interface INS extends Attrs, Flow, _Child { - /** info on reason for change - * @param uri - * @return the current element builder - */ - INS $cite(String uri); - - /** date and time of change - * @param datetime - * @return the current element builder - */ - INS $datetime(String datetime); - } - - /** - * @see _InsDel INS/DEL quirks. - */ - public interface DEL extends Attrs, Flow, _Child { - /** info on reason for change - * @param uri the info URI - * @return the current element builder - */ - DEL $cite(String uri); - - /** date and time of change - * @param datetime the time - * @return the current element builder - */ - DEL $datetime(String datetime); - } - - /** - * - */ - public interface _Dl extends _Child { - - /** - * Add a DT (term of the item) element. - * @return a new DT element builder - */ - DT dt(); - - /** - * Add a complete DT element. - * @param cdata the content - * @return the current element builder - */ - _Dl dt(String cdata); - - /** - * Add a DD (definition/description) element. - * @return a new DD element builder - */ - DD dd(); - - /** - * Add a complete DD element. - * @param cdata the content - * @return the current element builder - */ - _Dl dd(String cdata); - } - - /** - * - */ - public interface DL extends Attrs, _Dl, _Child { - } - - /** - * - */ - @Element(endTag=false) - public interface DT extends Attrs, Inline, _Child { - } - - /** - * - */ - @Element(endTag=false) - public interface DD extends Attrs, Flow, _Child { - } - - /** - * - */ - public interface _Li extends _Child { - - /** - * Add a LI (list item) element. - * @return a new LI element builder - */ - LI li(); - - /** - * Add a LI element. - * @param cdata the content - * @return the current element builder - */ - _Li li(String cdata); - } - - /** - * - */ - public interface OL extends Attrs, _Li, _Child { - } - - /** - * - */ - public interface UL extends Attrs, _Li, _Child { - } - - /** - * - */ - @Element(endTag=false) - public interface LI extends Attrs, Flow, _Child { - } - - /** - * - */ - public interface FORM extends Attrs, _Child, /* (%block;|SCRIPT)+ -(FORM) */ - _Script, _Block, _FieldSet { - /** server-side form handler - * @param uri - * @return the current element builder - */ - FORM $action(String uri); - - /** HTTP method used to submit the form - * @param method - * @return the current element builder - */ - FORM $method(Method method); - - /** - * contentype for "POST" method. - * The default is "application/x-www-form-urlencoded". - * Use "multipart/form-data" for input type=file - * @param enctype - * @return the current element builder - */ - FORM $enctype(String enctype); - - /** list of MIME types for file upload - * @param cdata - * @return the current element builder - */ - FORM $accept(String cdata); - - /** name of form for scripting - * @param cdata - * @return the current element builder - */ - FORM $name(String cdata); - - /** the form was submitted - * @param script - * @return the current element builder - */ - FORM $onsubmit(String script); - - /** the form was reset - * @param script - * @return the current element builder - */ - FORM $onreset(String script); - - /** (space and/or comma separated) list of supported charsets - * @param cdata - * @return the current element builder - */ - FORM $accept_charset(String cdata); - } - - /** - * - */ - public interface LABEL extends Attrs, _Child, /* (%inline;)* -(LABEL) */ - PCData, FontStyle, Phrase, Special, _FormCtrl { - /** matches field ID value - * @param cdata - * @return the current element builder - */ - LABEL $for(String cdata); - - /** accessibility key character - * @param cdata - * @return the current element builder - */ - LABEL $accesskey(String cdata); - - /** the element got the focus - * @param script - * @return the current element builder - */ - LABEL $onfocus(String script); - - /** the element lost the focus - * @param script - * @return the current element builder - */ - LABEL $onblur(String script); - } - - /** - * - */ - @Element(endTag=false) - public interface INPUT extends Attrs, _Child { - /** what kind of widget is needed. default is "text". - * @param inputType - * @return the current element builder - */ - INPUT $type(InputType inputType); - - /** submit as part of form - * @param cdata - * @return the current element builder - */ - INPUT $name(String cdata); - - /** Specify for radio buttons and checkboxes - * @param cdata - * @return the current element builder - */ - INPUT $value(String cdata); - - /** for radio buttons and check boxes - * @return the current element builder - */ - INPUT $checked(); - - /** unavailable in this context - * @return the current element builder - */ - INPUT $disabled(); - - /** for text and passwd - * @return the current element builder - */ - INPUT $readonly(); - - /** specific to each type of field - * @param cdata - * @return the current element builder - */ - INPUT $size(String cdata); - - /** max chars for text fields - * @param length - * @return the current element builder - */ - INPUT $maxlength(int length); - - /** for fields with images - * @param uri - * @return the current element builder - */ - INPUT $src(String uri); - - /** short description - * @param cdata - * @return the current element builder - */ - INPUT $alt(String cdata); - - // $usemap omitted. use img instead of input for image maps. - /** use server-side image map - * @return the current element builder - */ - INPUT $ismap(); - - /** position in tabbing order - * @param index - * @return the current element builder - */ - INPUT $tabindex(int index); - - /** accessibility key character - * @param cdata - * @return the current element builder - */ - INPUT $accesskey(String cdata); - - /** the element got the focus - * @param script - * @return the current element builder - */ - INPUT $onfocus(String script); - - /** the element lost the focus - * @param script - * @return the current element builder - */ - INPUT $onblur(String script); - - /** some text was selected - * @param script - * @return the current element builder - */ - INPUT $onselect(String script); - - /** the element value was changed - * @param script - * @return the current element builder - */ - INPUT $onchange(String script); - - /** list of MIME types for file upload (csv) - * @param contentTypes - * @return the current element builder - */ - INPUT $accept(String contentTypes); - } - - /** - * - */ - public interface _Option extends _Child { - /** - * Add a OPTION element. - * @return a new OPTION element builder - */ - OPTION option(); - - /** - * Add a complete OPTION element. - * @param cdata the content - * @return the current element builder - */ - _Option option(String cdata); - } - - /** - * - */ - public interface SELECT extends Attrs, _Option, _Child { - /** - * Add a OPTGROUP element. - * @return a new OPTGROUP element builder - */ - OPTGROUP optgroup(); - - /** field name - * @param cdata - * @return the current element builder - */ - SELECT $name(String cdata); - - /** rows visible - * @param rows - * @return the current element builder - */ - SELECT $size(int rows); - - /** default is single selection - * @return the current element builder - */ - SELECT $multiple(); - - /** unavailable in this context - * @return the current element builder - */ - SELECT $disabled(); - - /** position in tabbing order - * @param index - * @return the current element builder - */ - SELECT $tabindex(int index); - - /** the element got the focus - * @param script - * @return the current element builder - */ - SELECT $onfocus(String script); - - /** the element lost the focus - * @param script - * @return the current element builder - */ - SELECT $onblur(String script); - - /** the element value was changed - * @param script - * @return the current element builder - */ - SELECT $onchange(String script); - } - - /** - * - */ - public interface OPTGROUP extends Attrs, _Option, _Child { - /** unavailable in this context - * @return the current element builder - */ - OPTGROUP $disabled(); - - /** for use in hierarchical menus - * @param cdata - * @return the current element builder - */ - OPTGROUP $label(String cdata); - } - - /** - * - */ - @Element(endTag=false) - public interface OPTION extends Attrs, PCData, _Child { - /** currently selected option - * @return the current element builder - */ - OPTION $selected(); - - /** unavailable in this context - * @return the current element builder - */ - OPTION $disabled(); - - /** for use in hierarchical menus - * @param cdata - * @return the current element builder - */ - OPTION $label(String cdata); - - /** defaults to element content - * @param cdata - * @return the current element builder - */ - OPTION $value(String cdata); - } - - /** - * - */ - public interface TEXTAREA extends Attrs, PCData, _Child { - /** variable name for the text - * @param cdata - * @return the current element builder - */ - TEXTAREA $name(String cdata); - - /** visible rows - * @param rows - * @return the current element builder - */ - TEXTAREA $rows(int rows); - - /** visible columns - * @param cols - * @return the current element builder - */ - TEXTAREA $cols(int cols); - - /** unavailable in this context - * @return the current element builder - */ - TEXTAREA $disabled(); - - /** text is readonly - * @return the current element builder - */ - TEXTAREA $readonly(); - - /** position in tabbing order - * @param index - * @return the current element builder - */ - TEXTAREA $tabindex(int index); - - /** accessibility key character - * @param cdata - * @return the current element builder - */ - TEXTAREA $accesskey(String cdata); - - /** the element got the focus - * @param script - * @return the current element builder - */ - TEXTAREA $onfocus(String script); - - /** the element lost the focus - * @param script - * @return the current element builder - */ - TEXTAREA $onblur(String script); - - /** some text was selected - * @param script - * @return the current element builder - */ - TEXTAREA $onselect(String script); - - /** the element value was changed - * @param script - * @return the current element builder - */ - TEXTAREA $onchange(String script); - } - - /** - * - */ - public interface _Legend extends _Child { - /** - * Add a LEGEND element. - * @return a new LEGEND element builder - */ - LEGEND legend(); - - /** - * Add a LEGEND element. - * @param cdata - * @return the current element builder - */ - _Legend legend(String cdata); - } - - /** - * - */ - public interface FIELDSET extends Attrs, _Legend, PCData, Flow, _Child { - } - - /** - * - */ - public interface LEGEND extends Attrs, Inline, _Child { - /** accessibility key character - * @param cdata - * @return the current element builder - */ - LEGEND $accesskey(String cdata); - } - - /** - * - */ - public interface BUTTON extends /* (%flow;)* -(A|%formctrl|FORM|FIELDSET) */ - _Block, PCData, FontStyle, Phrase, _Special, _ImgObject, _SubSup, Attrs { - /** name of the value - * @param cdata - * @return the current element builder - */ - BUTTON $name(String cdata); - - /** sent to server when submitted - * @param cdata - * @return the current element builder - */ - BUTTON $value(String cdata); - - /** for use as form button - * @param type - * @return the current element builder - */ - BUTTON $type(ButtonType type); - - /** unavailable in this context - * @return the current element builder - */ - BUTTON $disabled(); - - /** position in tabbing order - * @param index - * @return the current element builder - */ - BUTTON $tabindex(int index); - - /** accessibility key character - * @param cdata - * @return the current element builder - */ - BUTTON $accesskey(String cdata); - - /** the element got the focus - * @param script - * @return the current element builder - */ - BUTTON $onfocus(String script); - - /** the element lost the focus - * @param script - * @return the current element builder - */ - BUTTON $onblur(String script); - } - - /** - * - */ - public interface _TableRow { - /** - * Add a TR (table row) element. - * @return a new TR element builder - */ - TR tr(); - - /** - * Add a TR element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new TR element builder - */ - TR tr(String selector); - } - - /** - * - */ - public interface _TableCol extends _Child { - /** - * Add a COL element. - * @return a new COL element builder - */ - COL col(); - - /** - * Add a COL element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return the current element builder - */ - _TableCol col(String selector); - } - - /** - * - */ - public interface _Table extends _TableRow, _TableCol { - /** - * Add a CAPTION element. - * @return a new CAPTION element builder - */ - CAPTION caption(); - - /** - * Add a CAPTION element. - * @param cdata - * @return the current element builder - */ - _Table caption(String cdata); - - /** - * Add a COLGROPU element. - * @return a new COLGROUP element builder - */ - COLGROUP colgroup(); - - /** - * Add a THEAD element. - * @return a new THEAD element builder - */ - THEAD thead(); - - /** - * Add a THEAD element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new THEAD element builder - */ - THEAD thead(String selector); - - /** - * Add a TFOOT element. - * @return a new TFOOT element builder - */ - TFOOT tfoot(); - - /** - * Add a TFOOT element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new TFOOT element builder - */ - TFOOT tfoot(String selector); - - /** - * Add a tbody (table body) element. - * Must be after thead/tfoot and no tr at the same level. - * @return a new tbody element builder - */ - TBODY tbody(); - - /** - * Add a TBODY element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new TBODY element builder - */ - TBODY tbody(String selector); - - // $summary, width, border, frame, rules, cellpadding, cellspacing omitted - // use css instead - } - /** - * TBODY should be used after THEAD/TFOOT, iff there're no TABLE.TR elements. - */ - public interface TABLE extends Attrs, _Table, _Child { - } - - /** - * - */ - public interface CAPTION extends Attrs, Inline, _Child { - } - - /** - * - */ - @Element(endTag=false) - public interface THEAD extends Attrs, _TableRow, _Child { - } - - /** - * - */ - @Element(endTag=false) - public interface TFOOT extends Attrs, _TableRow, _Child { - } - - /** - * - */ - public interface TBODY extends Attrs, _TableRow, _Child { - } - - /** - * - */ - @Element(endTag=false) - public interface COLGROUP extends Attrs, _TableCol, _Child { - /** default number of columns in group. default: 1 - * @param cols - * @return the current element builder - */ - COLGROUP $span(int cols); - - // $width omitted. use css instead. - } - - /** - * - */ - @Element(endTag=false) - public interface COL extends Attrs, _Child { - /** COL attributes affect N columns. default: 1 - * @param cols - * @return the current element builder - */ - COL $span(int cols); - // $width omitted. use css instead. - } - - /** - * - */ - public interface _Tr extends _Child { - /** - * Add a TH element. - * @return a new TH element builder - */ - TH th(); - - /** - * Add a complete TH element. - * @param cdata the content - * @return the current element builder - */ - _Tr th(String cdata); - - /** - * Add a TH element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - _Tr th(String selector, String cdata); - - /** - * Add a TD element. - * @return a new TD element builder - */ - TD td(); - - /** - * Add a TD element. - * @param cdata the content - * @return the current element builder - */ - _Tr td(String cdata); - - /** - * Add a TD element. - * @param selector the css selector in the form of (#id)*(.class)* - * @param cdata the content - * @return the current element builder - */ - _Tr td(String selector, String cdata); - } - - /** - * - */ - @Element(endTag=false) - public interface TR extends Attrs, _Tr, _Child { - } - - /** - * - */ - public interface _Cell extends Attrs, Flow, _Child { - // $abbr omited. begin cell text with terse text instead. - // use $title for elaberation, when appropriate. - // $axis omitted. use scope. - /** space-separated list of id's for header cells - * @param cdata - * @return the current element builder - */ - _Cell $headers(String cdata); - - /** scope covered by header cells - * @param scope - * @return the current element builder - */ - _Cell $scope(Scope scope); - - /** number of rows spanned by cell. default: 1 - * @param rows - * @return the current element builder - */ - _Cell $rowspan(int rows); - - /** number of cols spanned by cell. default: 1 - * @param cols - * @return the current element builder - */ - _Cell $colspan(int cols); - } - - /** - * - */ - @Element(endTag=false) - public interface TH extends _Cell { - } - - /** - * - */ - @Element(endTag=false) - public interface TD extends _Cell { - } - - /** - * - */ - public interface _Head extends HeadMisc { - /** - * Add a TITLE element. - * @return a new TITLE element builder - */ - TITLE title(); - - /** - * Add a TITLE element. - * @param cdata the content - * @return the current element builder - */ - _Head title(String cdata); - - /** - * Add a BASE element. - * @return a new BASE element builder - */ - BASE base(); - - /** - * Add a complete BASE element. - * @param uri - * @return the current element builder - */ - _Head base(String uri); - } - - /** - * - */ - public interface HEAD extends I18nAttrs, _Head, _Child { - // $profile omitted - } - - /** - * - */ - public interface TITLE extends I18nAttrs, PCData, _Child { - } - - /** - * - */ - @Element(endTag=false) - public interface BASE extends _Child { - /** URI that acts as base URI - * @param uri - * @return the current element builder - */ - BASE $href(String uri); - } - - /** - * - */ - @Element(endTag=false) - public interface META extends I18nAttrs, _Child { - /** HTTP response header name - * @param header - * @return the current element builder - */ - META $http_equiv(String header); - - /** metainformation name - * @param name - * @return the current element builder - */ - META $name(String name); - - /** associated information - * @param cdata - * @return the current element builder - */ - META $content(String cdata); - - // $scheme omitted - } - - /** - * - */ - public interface STYLE extends I18nAttrs, _Content, _Child { - /** content type of style language - * @param cdata - * @return the current element builder - */ - STYLE $type(String cdata); - - /** designed for use with these media - * @param media - * @return the current element builder - */ - STYLE $media(EnumSet media); - - /** advisory title - * @param cdata - * @return the current element builder - */ - STYLE $title(String cdata); - } - - /** - * - */ - public interface SCRIPT extends _Content, _Child { - /** char encoding of linked resource - * @param cdata - * @return the current element builder - */ - SCRIPT $charset(String cdata); - - /** content type of script language - * @param cdata - * @return the current element builder - */ - SCRIPT $type(String cdata); - - /** URI for an external script - * @param cdata - * @return the current element builder - */ - SCRIPT $src(String cdata); - - /** UA may defer execution of script - * @param cdata - * @return the current element builder - */ - SCRIPT $defer(String cdata); - } - - /** - * - */ - public interface _Html extends _Head, _Body, _ { - /** - * Add a HEAD element. - * @return a new HEAD element builder - */ - HEAD head(); - - /** - * Add a BODY element. - * @return a new BODY element builder - */ - BODY body(); - - /** - * Add a BODY element. - * @param selector the css selector in the form of (#id)*(.class)* - * @return a new BODY element builder - */ - BODY body(String selector); - } - - // There is only one HEAD and BODY, in that order. - /** - * The root element - */ - public interface HTML extends I18nAttrs, _Html { - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java deleted file mode 100644 index 4d0cf4950c5df..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Deprecated. - * Use org.apache.hadoop.yarn.webapp.hamlet2 package instead. - */ -@Deprecated -@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) -package org.apache.hadoop.yarn.webapp.hamlet; -import org.apache.hadoop.classification.InterfaceAudience; - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestParseSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestParseSelector.java deleted file mode 100644 index 3b6de66eedab4..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestParseSelector.java +++ /dev/null @@ -1,57 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn.webapp.hamlet; - -import org.junit.Test; -import static org.junit.Assert.*; - -import org.apache.hadoop.yarn.webapp.WebAppException; - -import static org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.*; - -public class TestParseSelector { - - @Test public void testNormal() { - String[] res = parseSelector("#id.class"); - assertEquals("id", res[S_ID]); - assertEquals("class", res[S_CLASS]); - } - - @Test public void testMultiClass() { - String[] res = parseSelector("#id.class1.class2"); - assertEquals("id", res[S_ID]); - assertEquals("class1 class2", res[S_CLASS]); - } - - @Test public void testMissingId() { - String[] res = parseSelector(".class"); - assertNull(res[S_ID]); - assertEquals("class", res[S_CLASS]); - } - - @Test public void testMissingClass() { - String[] res = parseSelector("#id"); - assertEquals("id", res[S_ID]); - assertNull(res[S_CLASS]); - } - - @Test(expected=WebAppException.class) public void testMissingAll() { - parseSelector(""); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet2/TestHamlet.java similarity index 65% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet2/TestHamlet.java index d9eaa37b3c323..275b64cb79d61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet2/TestHamlet.java @@ -1,33 +1,31 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn.webapp.hamlet; +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.webapp.hamlet2; import java.util.EnumSet; import java.io.PrintWriter; import org.junit.Test; -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import static org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.*; +import static org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.*; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.*; public class TestHamlet { @@ -36,11 +34,11 @@ public class TestHamlet { title("test"). h1("heading 1"). p("#id.class"). - b("hello"). - em("world!")._(). + b("hello"). + em("world!").__(). div("#footer"). - _("Brought to you by"). - a("http://hostname/", "Somebody")._(); + __("Brought to you by"). + a("https://hostname/", "Somebody").__(); PrintWriter out = h.getWriter(); out.flush(); @@ -64,7 +62,7 @@ public class TestHamlet { verify(out).print(" id=\"footer\""); verify(out).print("Brought to you by"); verify(out).print(""); verify(out).print("

    "); @@ -79,9 +77,9 @@ public class TestHamlet { TABLE t = h.table("#id"); for (int i = 0; i < 3; ++i) { - t.tr().td("1").td("2")._(); + t.tr().td("1").td("2").__(); } - t._(); + t.__(); PrintWriter out = h.getWriter(); out.flush(); @@ -97,12 +95,12 @@ public class TestHamlet { meta_http("Content-type", "text/html; charset=utf-8"). title("test enum attrs"). link().$rel("stylesheet"). - $media(EnumSet.of(Media.screen, Media.print)). - $type("text/css").$href("style.css")._(). + $media(EnumSet.of(Media.screen, Media.print)). + $type("text/css").$href("style.css").__(). link().$rel(EnumSet.of(LinkType.index, LinkType.start)). - $href("index.html")._(); + $href("index.html").__(); - h.div("#content")._("content")._(); + h.div("#content").__("content").__(); PrintWriter out = h.getWriter(); out.flush(); @@ -126,12 +124,12 @@ public class TestHamlet { @Test public void testPreformatted() { Hamlet h = newHamlet(). div(). - i("inline before pre"). - pre(). - _("pre text1\npre text2"). - i("inline in pre"). - _("pre text after inline")._(). - i("inline after pre")._(); + i("inline before pre"). + pre(). + __("pre text1\npre text2"). + i("inline in pre"). + __("pre text after inline").__(). + i("inline after pre").__(); PrintWriter out = h.getWriter(); out.flush(); @@ -149,8 +147,8 @@ static class TestView2 implements SubView { @Test public void testSubViews() { Hamlet h = newHamlet(). title("test sub-views"). - div("#view1")._(TestView1.class)._(). - div("#view2")._(TestView2.class)._(); + div("#view1").__(TestView1.class).__(). + div("#view2").__(TestView2.class).__(); PrintWriter out = h.getWriter(); out.flush(); @@ -163,4 +161,4 @@ static Hamlet newHamlet() { PrintWriter out = spy(new PrintWriter(System.out)); return new Hamlet(out, 0, false); } -} +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet2/TestHamletImpl.java similarity index 55% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamletImpl.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet2/TestHamletImpl.java index 9a1efcf84f1af..3b19aa30c3d11 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet/TestHamletImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet2/TestHamletImpl.java @@ -1,30 +1,29 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn.webapp.hamlet; +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.webapp.hamlet2; import java.io.PrintWriter; import org.junit.Test; -import static org.junit.Assert.*; + +import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.*; -import org.apache.hadoop.yarn.webapp.hamlet.HamletImpl; -import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.*; +import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.*; public class TestHamletImpl { /** @@ -35,25 +34,25 @@ public class TestHamletImpl { PrintWriter out = spy(new PrintWriter(System.out)); HamletImpl hi = new HamletImpl(out, 0, false); hi. - root("start")._attr("name", "value"). - _("start text"). + root("start")._attr("name", "value"). + __("start text"). elem("sub")._attr("name", "value"). - _("sub text")._(). + __("sub text").__(). elem("sub1")._noEndTag()._attr("boolean", null). - _("sub1text")._(). - _("start text2"). + __("sub1text").__(). + __("start text2"). elem("pre")._pre(). - _("pre text"). - elem("i")._inline()._("inline")._()._(). - elem("i")._inline()._("inline after pre")._(). - _("start text3"). + __("pre text"). + elem("i")._inline().__("inline").__().__(). + elem("i")._inline().__("inline after pre").__(). + __("start text3"). elem("sub2"). - _("sub2text")._(). + __("sub2text").__(). elem("sub3")._noEndTag(). - _("sub3text")._(). + __("sub3text").__(). elem("sub4")._noEndTag(). - elem("i")._inline()._("inline")._(). - _("sub4text")._()._(); + elem("i")._inline().__("inline").__(). + __("sub4text").__().__(); out.flush(); assertEquals(0, hi.nestLevel); @@ -75,11 +74,11 @@ public class TestHamletImpl { verify(e).$class("class"); H1 t = mock(H1.class); - HamletImpl.setSelector(t, "#id.class")._("heading"); + HamletImpl.setSelector(t, "#id.class").__("heading"); verify(t).$id("id"); verify(t).$class("class"); - verify(t)._("heading"); + verify(t).__("heading"); } @Test public void testSetLinkHref() { @@ -105,4 +104,4 @@ public class TestHamletImpl { verifyNoMoreInteractions(script); } -} +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet2/TestParseSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet2/TestParseSelector.java new file mode 100644 index 0000000000000..e2141e6942c09 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/hamlet2/TestParseSelector.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.webapp.hamlet2; + +import org.junit.Test; + +import static org.apache.hadoop.yarn.webapp.hamlet2.HamletImpl.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import org.apache.hadoop.yarn.webapp.WebAppException; + +public class TestParseSelector { + + @Test public void testNormal() { + String[] res = parseSelector("#id.class"); + assertEquals("id", res[S_ID]); + assertEquals("class", res[S_CLASS]); + } + + @Test public void testMultiClass() { + String[] res = parseSelector("#id.class1.class2"); + assertEquals("id", res[S_ID]); + assertEquals("class1 class2", res[S_CLASS]); + } + + @Test public void testMissingId() { + String[] res = parseSelector(".class"); + assertNull(res[S_ID]); + assertEquals("class", res[S_CLASS]); + } + + @Test public void testMissingClass() { + String[] res = parseSelector("#id"); + assertEquals("id", res[S_ID]); + assertNull(res[S_CLASS]); + } + + @Test(expected=WebAppException.class) public void testMissingAll() { + parseSelector(""); + } +} \ No newline at end of file From dc09e55b38c4268b53f9cbff7f1af2a0b7d00338 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 21 May 2021 18:04:24 +0200 Subject: [PATCH 337/666] YARN-10779. Add option to disable lowercase conversion in GetApplicationsRequestPBImpl and ApplicationSubmissionContextPBImpl. Contributed by Peter Bacsko --- .../hadoop/yarn/conf/YarnConfiguration.java | 5 +- .../impl/pb/GetApplicationsRequestPBImpl.java | 28 ++++++- .../ApplicationSubmissionContextPBImpl.java | 30 +++++++- .../src/main/resources/yarn-default.xml | 6 ++ .../pb/TestGetApplicationsRequestPBImpl.java | 74 +++++++++++++++++++ ...estApplicationSubmissionContextPBImpl.java | 74 +++++++++++++++++++ 6 files changed, 211 insertions(+), 6 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/TestGetApplicationsRequestPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationSubmissionContextPBImpl.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 833aeccd0bdf7..2ebf79cfae3f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1954,7 +1954,10 @@ public static boolean isAclEnabled(Configuration conf) { false; public static final String APPLICATION_TAG_BASED_PLACEMENT_USER_WHITELIST = APPLICATION_TAG_BASED_PLACEMENT_PREFIX + ".username.whitelist"; - + public static final String APPLICATION_TAG_FORCE_LOWERCASE_CONVERSION = + APPLICATION_TAG_BASED_PLACEMENT_PREFIX + ".force-lowercase"; + public static final boolean DEFAULT_APPLICATION_TAG_FORCE_LOWERCASE_CONVERSION = + true; /** Enable switch for container log monitoring. */ public static final String NM_CONTAINER_LOG_MONITOR_ENABLED = NM_PREFIX + "container-log-monitor.enable"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java index 32b576aeec176..97f4ad0338cbd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java @@ -18,6 +18,9 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.APPLICATION_TAG_FORCE_LOWERCASE_CONVERSION; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_APPLICATION_TAG_FORCE_LOWERCASE_CONVERSION; + import java.util.EnumSet; import java.util.HashSet; import java.util.List; @@ -25,6 +28,7 @@ import org.apache.commons.lang3.Range; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; @@ -33,12 +37,14 @@ import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProtoOrBuilder; - +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.TextFormat; @Private @Unstable public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { + private static volatile Boolean forceLowerCaseTags; + GetApplicationsRequestProto proto = GetApplicationsRequestProto.getDefaultInstance(); GetApplicationsRequestProto.Builder builder = null; boolean viaProto = false; @@ -56,11 +62,23 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { public GetApplicationsRequestPBImpl() { builder = GetApplicationsRequestProto.newBuilder(); + initLowerCaseConfig(); } public GetApplicationsRequestPBImpl(GetApplicationsRequestProto proto) { this.proto = proto; viaProto = true; + initLowerCaseConfig(); + } + + private static void initLowerCaseConfig() { + if (forceLowerCaseTags == null) { + Configuration conf = new Configuration(); + + forceLowerCaseTags = + conf.getBoolean(APPLICATION_TAG_FORCE_LOWERCASE_CONVERSION, + DEFAULT_APPLICATION_TAG_FORCE_LOWERCASE_CONVERSION); + } } public synchronized GetApplicationsRequestProto getProto() { @@ -210,7 +228,8 @@ public synchronized void setApplicationTags(Set tags) { // Convert applicationTags to lower case and add this.applicationTags = new HashSet(); for (String tag : tags) { - this.applicationTags.add(StringUtils.toLowerCase(tag)); + this.applicationTags.add( + forceLowerCaseTags ? StringUtils.toLowerCase(tag) : tag); } } @@ -406,4 +425,9 @@ public boolean equals(Object other) { public String toString() { return TextFormat.shortDebugString(getProto()); } + + @VisibleForTesting + static void setForceLowerCaseTags(boolean convert) { + GetApplicationsRequestPBImpl.forceLowerCaseTags = convert; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index 5f9a71bd442b3..3eb139b18ca6f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -18,6 +18,9 @@ package org.apache.hadoop.yarn.api.records.impl.pb; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.APPLICATION_TAG_FORCE_LOWERCASE_CONVERSION; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_APPLICATION_TAG_FORCE_LOWERCASE_CONVERSION; + import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -29,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; @@ -50,13 +54,15 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; import org.apache.hadoop.yarn.proto.YarnProtos.StringStringMapProto; - +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.TextFormat; @Private @Unstable public class ApplicationSubmissionContextPBImpl extends ApplicationSubmissionContext { + private static volatile Boolean forceLowerCaseTags; + ApplicationSubmissionContextProto proto = ApplicationSubmissionContextProto.getDefaultInstance(); ApplicationSubmissionContextProto.Builder builder = null; @@ -75,14 +81,26 @@ public class ApplicationSubmissionContextPBImpl public ApplicationSubmissionContextPBImpl() { builder = ApplicationSubmissionContextProto.newBuilder(); + initLowerCaseConfig(); } public ApplicationSubmissionContextPBImpl( ApplicationSubmissionContextProto proto) { this.proto = proto; viaProto = true; + initLowerCaseConfig(); } - + + private static void initLowerCaseConfig() { + if (forceLowerCaseTags == null) { + Configuration conf = new Configuration(); + + forceLowerCaseTags = + conf.getBoolean(APPLICATION_TAG_FORCE_LOWERCASE_CONVERSION, + DEFAULT_APPLICATION_TAG_FORCE_LOWERCASE_CONVERSION); + } + } + public synchronized ApplicationSubmissionContextProto getProto() { mergeLocalToProto(); proto = viaProto ? proto : builder.build(); @@ -287,7 +305,8 @@ public synchronized void setApplicationTags(Set tags) { // Convert applicationTags to lower case and add this.applicationTags = new TreeSet<>(); for (String tag : tags) { - this.applicationTags.add(StringUtils.toLowerCase(tag)); + this.applicationTags.add( + forceLowerCaseTags ? StringUtils.toLowerCase(tag) : tag); } } @@ -719,4 +738,9 @@ public synchronized void setApplicationSchedulingPropertiesMap( this.schedulingProperties.clear(); this.schedulingProperties.putAll(schedulingPropertyMap); } + + @VisibleForTesting + static void setForceLowerCaseTags(boolean convert) { + ApplicationSubmissionContextPBImpl.forceLowerCaseTags = convert; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 0e9c00beeadc5..b64efba5dca35 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -4773,4 +4773,10 @@ yarn.resourcemanager.application.max-tag.length 100 + + + Specifies whether application tags should be converted to lowercase or not. + yarn.resourcemanager.application-tag-based-placement.force-lowercase + true + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/TestGetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/TestGetApplicationsRequestPBImpl.java new file mode 100644 index 0000000000000..ff9c778139fe5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/TestGetApplicationsRequestPBImpl.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class TestGetApplicationsRequestPBImpl { + @Parameter + @SuppressWarnings("checkstyle:visibilitymodifier") + public GetApplicationsRequestPBImpl impl; + + @Test + public void testAppTagsLowerCaseConversionDefault() { + impl.setApplicationTags(Sets.newHashSet("ABcd", "efgH")); + impl.getApplicationTags().forEach(s -> + assertEquals(s, s.toLowerCase())); + } + + @Test + public void testAppTagsLowerCaseConversionDisabled() { + GetApplicationsRequestPBImpl.setForceLowerCaseTags(false); + impl.setApplicationTags(Sets.newHashSet("ABcd", "efgH")); + impl.getApplicationTags().forEach(s -> + assertNotEquals(s, s.toLowerCase())); + } + + @Test + public void testAppTagsLowerCaseConversionEnabled() { + GetApplicationsRequestPBImpl.setForceLowerCaseTags(true); + impl.setApplicationTags(Sets.newHashSet("ABcd", "efgH")); + impl.getApplicationTags().forEach(s -> + assertEquals(s, s.toLowerCase())); + } + + @Parameters + public static Collection data() { + List list = new ArrayList<>(); + list.add(new Object[] {new GetApplicationsRequestPBImpl()}); + list.add(new Object[] {new GetApplicationsRequestPBImpl( + GetApplicationsRequestProto.newBuilder().build())}); + + return list; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationSubmissionContextPBImpl.java new file mode 100644 index 0000000000000..a0d8bb3d69250 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationSubmissionContextPBImpl.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records.impl.pb; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class TestApplicationSubmissionContextPBImpl { + @Parameter + @SuppressWarnings("checkstyle:visibilitymodifier") + public ApplicationSubmissionContextPBImpl impl; + + @Test + public void testAppTagsLowerCaseConversionDefault() { + impl.setApplicationTags(Sets.newHashSet("ABcd", "efgH")); + impl.getApplicationTags().forEach(s -> + assertEquals(s, s.toLowerCase())); + } + + @Test + public void testAppTagsLowerCaseConversionDisabled() { + ApplicationSubmissionContextPBImpl.setForceLowerCaseTags(false); + impl.setApplicationTags(Sets.newHashSet("ABcd", "efgH")); + impl.getApplicationTags().forEach(s -> + assertNotEquals(s, s.toLowerCase())); + } + + @Test + public void testAppTagsLowerCaseConversionEnabled() { + ApplicationSubmissionContextPBImpl.setForceLowerCaseTags(true); + impl.setApplicationTags(Sets.newHashSet("ABcd", "efgH")); + impl.getApplicationTags().forEach(s -> + assertEquals(s, s.toLowerCase())); + } + + @Parameters + public static Collection data() { + List list = new ArrayList<>(); + list.add(new Object[] {new ApplicationSubmissionContextPBImpl()}); + list.add(new Object[] {new ApplicationSubmissionContextPBImpl( + ApplicationSubmissionContextProto.newBuilder().build())}); + + return list; + } +} From 346b14a3b59e389f84092382ba0cf8df592fe320 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Fri, 21 May 2021 18:07:32 +0200 Subject: [PATCH 338/666] YARN-10766. [UI2] Bump moment-timezone to 0.5.33. Contributed by Andras Gyori --- .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower.json | 2 +- .../hadoop-yarn-ui/src/main/webapp/config/environment.js | 4 ++++ .../hadoop-yarn-ui/src/main/webapp/ember-cli-build.js | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower.json index 9b5cc9fb9fec7..0ef1367c24d19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower.json +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower.json @@ -13,7 +13,7 @@ "qunit": "1.19.0", "jquery-ui": "1.11.4", "moment": "2.12.0", - "moment-timezone": "0.5.1", + "moment-timezone": "0.5.33", "more-js": "0.8.2", "bootstrap": "3.3.6", "d3": "~3.5.6", diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/environment.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/environment.js index 3ff3c02984a2f..c6ed5ed3a0926 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/environment.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/environment.js @@ -33,6 +33,10 @@ module.exports = function(environment) { } }, + moment: { + includeTimezone: 'all' + }, + APP: DEFAULT_CONFIG, contentSecurityPolicy: { 'connect-src': "* 'self'", diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js index c4bb95f0e7a30..09da39b4ec8f0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js @@ -42,7 +42,7 @@ module.exports = function(defaults) { app.import("bower_components/datatables/media/css/jquery.dataTables.min.css"); app.import("bower_components/datatables/media/js/jquery.dataTables.min.js"); app.import("bower_components/momentjs/min/moment.min.js"); - app.import("bower_components/moment-timezone/builds/moment-timezone-with-data-2010-2020.min.js"); + app.import("bower_components/moment-timezone/builds/moment-timezone-with-data-10-year-range.min.js"); app.import("bower_components/select2/dist/css/select2.min.css"); app.import("bower_components/select2/dist/js/select2.min.js"); app.import('bower_components/jquery-ui/jquery-ui.js'); From 9cd7e90a60a038aa5ded7779f044cb4ad1c77e34 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Fri, 21 May 2021 09:26:50 -0700 Subject: [PATCH 339/666] HADOOP-17718. Explicitly set locale in the Dockerfile. (#3034) Reviewed-by: Akira Ajisaka --- dev-support/docker/Dockerfile | 4 ++++ dev-support/docker/Dockerfile_aarch64 | 4 ++++ dev-support/docker/Dockerfile_centos_7 | 4 ++++ dev-support/docker/Dockerfile_centos_8 | 4 ++++ 4 files changed, 16 insertions(+) diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 51743039e02d2..ef2f1562613e8 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -85,6 +85,10 @@ RUN apt-get -q update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* +RUN locale-gen en_US.UTF-8 +ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8' +ENV PYTHONIOENCODING=utf-8 + ###### # Set env vars required to build Hadoop ###### diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index 362c5466a4a14..710c989c7b624 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -89,6 +89,10 @@ RUN apt-get -q update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* +RUN locale-gen en_US.UTF-8 +ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8' +ENV PYTHONIOENCODING=utf-8 + ###### # Set env vars required to build Hadoop ###### diff --git a/dev-support/docker/Dockerfile_centos_7 b/dev-support/docker/Dockerfile_centos_7 index bf95835e8194d..319942e2f6d4a 100644 --- a/dev-support/docker/Dockerfile_centos_7 +++ b/dev-support/docker/Dockerfile_centos_7 @@ -93,6 +93,10 @@ RUN mkdir -p /opt/zstd /tmp/zstd \ && make install \ && cd /root || exit +RUN locale-gen en_US.UTF-8 +ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8' +ENV PYTHONIOENCODING=utf-8 + ###### # Set env vars required to build Hadoop ###### diff --git a/dev-support/docker/Dockerfile_centos_8 b/dev-support/docker/Dockerfile_centos_8 index ddf60b7fc049c..7f1f6b4ac2239 100644 --- a/dev-support/docker/Dockerfile_centos_8 +++ b/dev-support/docker/Dockerfile_centos_8 @@ -110,6 +110,10 @@ RUN mkdir -p /opt/zstd /tmp/zstd \ && make install \ && cd /root || exit +RUN locale-gen en_US.UTF-8 +ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8' +ENV PYTHONIOENCODING=utf-8 + ###### # Set env vars required to build Hadoop ###### From df8c752999165209abbc0f387f5b567a02d6b6dc Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Sat, 22 May 2021 15:36:58 +0530 Subject: [PATCH 340/666] HADOOP-17700. ExitUtil#halt info log should log HaltException (#3015) Reviewed-by: Steve Loughran Reviewed-by: Wei-Chiu Chuang --- .../src/main/java/org/apache/hadoop/util/ExitUtil.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java index dbe6663b2e715..32f4b5b7a72d4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java @@ -232,8 +232,7 @@ public static synchronized void halt(HaltException ee) throws HaltException { try { if (status != 0) { //exit indicates a problem, log it - LOG.debug("Halt with status {}: {}", status, msg, ee); - LOG.info("Halt with status {}: {}", status, msg, msg); + LOG.info("Halt with status {}: {}", status, msg, ee); } } catch (Exception ignored) { // ignore exceptions here, as it may be due to an out of memory situation From bca8a739e9cb13681bf7278f73dbd2e0613c4730 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Sat, 22 May 2021 14:55:01 +0200 Subject: [PATCH 341/666] YARN-7769. FS QueueManager should not create default queue at init. Contributed by Benjamin Teke --- .../scheduler/fair/QueueManager.java | 4 -- .../applicationsmanager/TestAMRestart.java | 10 +++- .../scheduler/fair/TestAppRunnability.java | 6 +- .../scheduler/fair/TestFSLeafQueue.java | 2 +- .../scheduler/fair/TestFairScheduler.java | 58 ++++++++++++------- .../fair/TestFairSchedulerFairShare.java | 3 + .../scheduler/fair/TestQueueManager.java | 7 ++- .../fair/converter/TestFSQueueConverter.java | 2 +- .../TestRMWebServicesFairScheduler.java | 2 +- ...vicesFairSchedulerCustomResourceTypes.java | 5 +- 10 files changed, 60 insertions(+), 39 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index d620077ca0198..62f63bbbdd133 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -107,10 +107,6 @@ public void initialize() { rootQueue.setDynamic(false); queues.put(rootQueue.getName(), rootQueue); - // Create the default queue - FSLeafQueue defaultQueue = - getLeafQueue(YarnConfiguration.DEFAULT_QUEUE_NAME, true); - defaultQueue.setDynamic(false); // Recursively reinitialize to propagate queue properties rootQueue.reinit(true); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java index 46311e2ab9d75..b2af776aa568f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -1313,8 +1314,13 @@ public void testQueueResourceDoesNotLeak() throws Exception { Assert.assertEquals(0, queue.getQueueResourceUsage().getUsed().getVirtualCores()); } else if (getSchedulerType() == SchedulerType.FAIR) { - FSLeafQueue queue = ((FairScheduler) scheduler).getQueueManager() - .getLeafQueue("root.default", false); + // The default queue is not auto created after YARN-7769 so + // user-as-default-queue option is used + Collection queues = ((FairScheduler) scheduler) + .getQueueManager().getLeafQueues(); + Assert.assertEquals(1, queues.size()); + + FSLeafQueue queue = queues.iterator().next(); Assert.assertEquals(0, queue.getResourceUsage().getMemorySize()); Assert.assertEquals(0, queue.getResourceUsage().getVirtualCores()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAppRunnability.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAppRunnability.java index 9777e14197027..be685eebc8d5d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAppRunnability.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAppRunnability.java @@ -125,9 +125,8 @@ public void testAppAdditionAndRemoval() { new AppAttemptAddedSchedulerEvent(createAppAttemptId(1, 1), false); scheduler.handle(attemptAddedEvent); - // Scheduler should have two queues (the default and the one created for - // user1) - assertEquals(2, scheduler.getQueueManager().getLeafQueues().size()); + // Scheduler should have one queue (the one created for user1) + assertEquals(1, scheduler.getQueueManager().getLeafQueues().size()); // That queue should have one app assertEquals(1, scheduler.getQueueManager().getLeafQueue("user1", true) @@ -174,6 +173,7 @@ public void testDontAllowUndeclaredPools() { AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("jerry").build()) + .addQueue(new AllocationFileQueue.Builder("default").build()) .writeToFile(ALLOC_FILE); // Restarting resource manager since the file location and content is diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java index 5af9bc3acdfef..61122833f12de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java @@ -147,7 +147,7 @@ public void test() { createSchedulingRequest(1 * 1024, "queueB", "user1"); scheduler.update(); Collection queues = scheduler.getQueueManager().getLeafQueues(); - assertEquals(3, queues.size()); + assertEquals(2, queues.size()); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 290b95498d373..a2ed6daea763f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -1074,22 +1074,22 @@ public void testHierarchicalQueuesSimilarParents() throws IOException { QueueManager queueManager = scheduler.getQueueManager(); FSLeafQueue leafQueue = queueManager.getLeafQueue("parent.child", true); - Assert.assertEquals(2, queueManager.getLeafQueues().size()); + Assert.assertEquals(1, queueManager.getLeafQueues().size()); Assert.assertNotNull(leafQueue); Assert.assertEquals("root.parent.child", leafQueue.getName()); FSLeafQueue leafQueue2 = queueManager.getLeafQueue("parent", true); Assert.assertNull(leafQueue2); - Assert.assertEquals(2, queueManager.getLeafQueues().size()); + Assert.assertEquals(1, queueManager.getLeafQueues().size()); FSLeafQueue leafQueue3 = queueManager.getLeafQueue("parent.child.grandchild", true); Assert.assertNull(leafQueue3); - Assert.assertEquals(2, queueManager.getLeafQueues().size()); + Assert.assertEquals(1, queueManager.getLeafQueues().size()); FSLeafQueue leafQueue4 = queueManager.getLeafQueue("parent.sister", true); Assert.assertNotNull(leafQueue4); Assert.assertEquals("root.parent.sister", leafQueue4.getName()); - Assert.assertEquals(3, queueManager.getLeafQueues().size()); + Assert.assertEquals(2, queueManager.getLeafQueues().size()); } @Test @@ -1771,8 +1771,8 @@ public void testEmptyQueueName() throws Exception { scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); - // only default queue - assertEquals(1, scheduler.getQueueManager().getLeafQueues().size()); + // no queue by default + assertEquals(0, scheduler.getQueueManager().getLeafQueues().size()); // Submit app with empty queue // Submit fails before we reach the placement check. @@ -1783,7 +1783,7 @@ public void testEmptyQueueName() throws Exception { scheduler.handle(appAddedEvent); // submission rejected - assertEquals(1, scheduler.getQueueManager().getLeafQueues().size()); + assertEquals(0, scheduler.getQueueManager().getLeafQueues().size()); assertNull(scheduler.getSchedulerApp(appAttemptId)); assertEquals(0, resourceManager.getRMContext().getRMApps().size()); } @@ -1794,8 +1794,8 @@ public void testQueueuNameWithPeriods() throws Exception { scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); - // only default queue - assertEquals(1, scheduler.getQueueManager().getLeafQueues().size()); + // no queue by default + assertEquals(0, scheduler.getQueueManager().getLeafQueues().size()); // Submit app with queue name (.A) // Submit fails before we reach the placement check. @@ -1805,7 +1805,7 @@ public void testQueueuNameWithPeriods() throws Exception { "user1"); scheduler.handle(appAddedEvent1); // submission rejected - assertEquals(1, scheduler.getQueueManager().getLeafQueues().size()); + assertEquals(0, scheduler.getQueueManager().getLeafQueues().size()); assertNull(scheduler.getSchedulerApp(appAttemptId1)); assertEquals(0, resourceManager.getRMContext().getRMApps().size()); @@ -1817,7 +1817,7 @@ public void testQueueuNameWithPeriods() throws Exception { "user1"); scheduler.handle(appAddedEvent2); // submission rejected - assertEquals(1, scheduler.getQueueManager().getLeafQueues().size()); + assertEquals(0, scheduler.getQueueManager().getLeafQueues().size()); assertNull(scheduler.getSchedulerApp(appAttemptId2)); assertEquals(0, resourceManager.getRMContext().getRMApps().size()); @@ -1829,7 +1829,7 @@ public void testQueueuNameWithPeriods() throws Exception { "user1", new ApplicationPlacementContext("A.B")); scheduler.handle(appAddedEvent3); // submission accepted - assertEquals(2, scheduler.getQueueManager().getLeafQueues().size()); + assertEquals(1, scheduler.getQueueManager().getLeafQueues().size()); assertNull(scheduler.getSchedulerApp(appAttemptId3)); assertEquals(0, resourceManager.getRMContext().getRMApps().size()); } @@ -1863,7 +1863,7 @@ public void testFairShareWithMinAlloc() throws Exception { scheduler.update(); Collection queues = scheduler.getQueueManager().getLeafQueues(); - assertEquals(3, queues.size()); + assertEquals(2, queues.size()); for (FSLeafQueue p : queues) { if (p.getName().equals("root.queueA")) { @@ -1944,6 +1944,8 @@ public void testSteadyFairShareWithReloadAndNodeAddRemove() throws Exception { .fairDefaultQueueSchedulingPolicy() .addQueue(new AllocationFileQueue.Builder("root") .schedulingPolicy("drf") + .subQueue(new AllocationFileQueue.Builder("default") + .weight(1).build()) .subQueue(new AllocationFileQueue.Builder("child1") .weight(1).build()) .subQueue(new AllocationFileQueue.Builder("child2") @@ -1981,6 +1983,8 @@ public void testSteadyFairShareWithReloadAndNodeAddRemove() throws Exception { .fairDefaultQueueSchedulingPolicy() .addQueue(new AllocationFileQueue.Builder("root") .schedulingPolicy("drf") + .subQueue(new AllocationFileQueue.Builder("default") + .weight(1).build()) .subQueue(new AllocationFileQueue.Builder("child1") .weight(1).build()) .subQueue(new AllocationFileQueue.Builder("child2") @@ -2015,6 +2019,17 @@ public void testSteadyFairShareWithQueueCreatedRuntime() throws Exception { conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, SimpleGroupsMapping.class, GroupMappingServiceProvider.class); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "true"); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + + AllocationFileWriter.create() + .fairDefaultQueueSchedulingPolicy() + .addQueue(new AllocationFileQueue.Builder("root") + .schedulingPolicy("drf") + .subQueue(new AllocationFileQueue.Builder("default") + .weight(1).build()) + .build()) + .writeToFile(ALLOC_FILE); + scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); @@ -2129,13 +2144,12 @@ public void testHierarchicalQueueAllocationFileParsing() throws IOException { QueueManager queueManager = scheduler.getQueueManager(); Collection leafQueues = queueManager.getLeafQueues(); - Assert.assertEquals(4, leafQueues.size()); + Assert.assertEquals(3, leafQueues.size()); Assert.assertNotNull(queueManager.getLeafQueue("queueA", false)); Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueC", false)); Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueD", false)); - Assert.assertNotNull(queueManager.getLeafQueue("default", false)); // Make sure querying for queues didn't create any new ones: - Assert.assertEquals(4, leafQueues.size()); + Assert.assertEquals(3, leafQueues.size()); } @Test @@ -4736,8 +4750,8 @@ public void testQueueNameWithTrailingSpace() throws Exception { scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); - // only default queue - assertEquals(1, scheduler.getQueueManager().getLeafQueues().size()); + // no queue by default + assertEquals(0, scheduler.getQueueManager().getLeafQueues().size()); // submit app with queue name "A" ApplicationAttemptId appAttemptId1 = createAppAttemptId(1, 1); @@ -4747,7 +4761,7 @@ public void testQueueNameWithTrailingSpace() throws Exception { appAttemptId1.getApplicationId(), "A", "user1", apc); scheduler.handle(appAddedEvent1); // submission accepted - assertEquals(2, scheduler.getQueueManager().getLeafQueues().size()); + assertEquals(1, scheduler.getQueueManager().getLeafQueues().size()); assertNotNull(scheduler.getSchedulerApplications().get(appAttemptId1. getApplicationId())); @@ -4771,7 +4785,7 @@ public void testQueueNameWithTrailingSpace() throws Exception { // expected ignore: rules should have filtered this out } // submission rejected - assertEquals(2, scheduler.getQueueManager().getLeafQueues().size()); + assertEquals(1, scheduler.getQueueManager().getLeafQueues().size()); assertNull(scheduler.getSchedulerApplications().get(appAttemptId2. getApplicationId())); assertNull(scheduler.getSchedulerApp(appAttemptId2)); @@ -4783,7 +4797,7 @@ public void testQueueNameWithTrailingSpace() throws Exception { appAttemptId3.getApplicationId(), "B.C", "user1", apc); scheduler.handle(appAddedEvent3); // submission accepted - assertEquals(3, scheduler.getQueueManager().getLeafQueues().size()); + assertEquals(2, scheduler.getQueueManager().getLeafQueues().size()); assertNotNull(scheduler.getSchedulerApplications().get(appAttemptId3. getApplicationId())); @@ -4806,7 +4820,7 @@ public void testQueueNameWithTrailingSpace() throws Exception { // expected ignore: rules should have filtered this out } // submission rejected - assertEquals(3, scheduler.getQueueManager().getLeafQueues().size()); + assertEquals(2, scheduler.getQueueManager().getLeafQueues().size()); assertNull(scheduler.getSchedulerApplications().get(appAttemptId4. getApplicationId())); assertNull(scheduler.getSchedulerApp(appAttemptId4)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java index 5ab920f3e608c..db612f23aad5c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java @@ -68,6 +68,9 @@ private void createClusterWithQueuesAndOneNode(int mem, int vCores, String policy) { AllocationFileWriter allocationFileWriter = AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("root") + .subQueue(new AllocationFileQueue.Builder("default") + .weight(1) + .build()) .subQueue(new AllocationFileQueue.Builder("parentA") .weight(8) .subQueue(new AllocationFileQueue.Builder("childA1").build()) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java index 1bad2b0e060b4..190259b9f29de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java @@ -116,10 +116,11 @@ public void testReloadTurnsLeafQueueIntoParent() { assertNotNull(queueManager.getLeafQueue("queue1.queue2", false)); assertNull(queueManager.getLeafQueue("queue1", false)); - // Should never to be able to create a queue under the default queue + // Since YARN-7769 FS doesn't create the default queue during init, so + // it should be possible to create a queue under the root.default queue updateConfiguredLeafQueues(queueManager, "default.queue3"); - assertNull(queueManager.getLeafQueue("default.queue3", false)); - assertNotNull(queueManager.getLeafQueue("default", false)); + assertNotNull(queueManager.getLeafQueue("default.queue3", false)); + assertNull(queueManager.getLeafQueue("default", false)); } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java index e648c1c91cd5e..cd562fb4502e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java @@ -151,7 +151,7 @@ public void testConvertQueueHierarchy() { converter.convertQueueHierarchy(rootQueue); // root children - assertEquals("root children", "default,admins,users,misc", + assertEquals("root children", "admins,users,misc,default", csConfig.get(PREFIX + "root.queues")); // root.admins children diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairScheduler.java index 58c72eee28c69..bf605e9f5f669 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairScheduler.java @@ -132,7 +132,7 @@ public void testClusterSchedulerWithSubQueues() JSONObject json = response.getEntity(JSONObject.class); JSONArray subQueueInfo = json.getJSONObject("scheduler") .getJSONObject("schedulerInfo").getJSONObject("rootQueue") - .getJSONObject("childQueues").getJSONArray("queue").getJSONObject(1) + .getJSONObject("childQueues").getJSONArray("queue").getJSONObject(0) .getJSONObject("childQueues").getJSONArray("queue"); // subQueueInfo is consist of subqueue1 and subqueue2 info assertEquals(2, subQueueInfo.length()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java index 1eed3b34ff7cd..7e9d11cc7bed8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java @@ -220,8 +220,9 @@ private void verifyJsonResponse(WebResource path, ClientResponse response, .getJSONObject("schedulerInfo").getJSONObject("rootQueue") .getJSONObject("childQueues").getJSONArray("queue"); - // childQueueInfo consists of subqueue1 and subqueue2 info - assertEquals(2, queues.length()); + assertEquals(1, queues.length()); + + // firstChildQueue info contains subqueue1 and subqueue2 info JSONObject firstChildQueue = queues.getJSONObject(0); new FairSchedulerJsonVerifications(customResourceTypes) .verify(firstChildQueue); From cf3156fa8009b3e22cb2bf7256ecaa45b29382b8 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Sat, 22 May 2021 15:00:08 +0200 Subject: [PATCH 342/666] YARN-10753. Document the removal of FS default queue creation. Contributed by Benjamin Teke --- .../hadoop-yarn-site/src/site/markdown/FairScheduler.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md index 7a11279fe4d2b..f0a43a55098d1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md @@ -63,7 +63,7 @@ Customizing the Fair Scheduler typically involves altering two files. First, sch | Property | Description | |:---- |:---- | | `yarn.scheduler.fair.allocation.file` | Path to allocation file. An allocation file is an XML manifest describing queues and their properties, in addition to certain policy defaults. This file must be in the XML format described in the next section. If a relative path is given, the file is searched for on the classpath (which typically includes the Hadoop conf directory). Defaults to fair-scheduler.xml. | -| `yarn.scheduler.fair.user-as-default-queue` | Whether to use the username associated with the allocation as the default queue name, in the event that a queue name is not specified. If this is set to "false" or unset, all jobs have a shared default queue, named "default". Defaults to true. If a queue placement policy is given in the allocations file, this property is ignored. | +| `yarn.scheduler.fair.user-as-default-queue` | Whether to use the username associated with the allocation as the default queue name, in the event that a queue name is not specified. If this is set to "false" or unset, all jobs have a shared default queue, named "default". Defaults to true. If a queue placement policy is given in the allocations file, this property is ignored. **Note:** If this is set to false, the "default" queue needs to be declared in the allocations file. | | `yarn.scheduler.fair.preemption` | Whether to use preemption. Defaults to false. | | `yarn.scheduler.fair.preemption.cluster-utilization-threshold` | The utilization threshold after which preemption kicks in. The utilization is computed as the maximum ratio of usage to capacity among all resources. Defaults to 0.8f. | | `yarn.scheduler.fair.sizebasedweight` | Whether to assign shares to individual apps based on their size, rather than providing an equal share to all apps regardless of size. When set to true, apps are weighted by the natural logarithm of one plus the app's total requested memory, divided by the natural logarithm of 2. Defaults to false. | @@ -72,7 +72,7 @@ Customizing the Fair Scheduler typically involves altering two files. First, sch | `yarn.scheduler.fair.max.assign` | If assignmultiple is true and dynamic.max.assign is false, the maximum amount of containers that can be assigned in one heartbeat. Defaults to -1, which sets no limit. | | `yarn.scheduler.fair.locality.threshold.node` | For applications that request containers on particular nodes, the number of scheduling opportunities since the last container assignment to wait before accepting a placement on another node. Expressed as a float between 0 and 1, which, as a fraction of the cluster size, is the number of scheduling opportunities to pass up. The default value of -1.0 means don't pass up any scheduling opportunities. | | `yarn.scheduler.fair.locality.threshold.rack` | For applications that request containers on particular racks, the number of scheduling opportunities since the last container assignment to wait before accepting a placement on another rack. Expressed as a float between 0 and 1, which, as a fraction of the cluster size, is the number of scheduling opportunities to pass up. The default value of -1.0 means don't pass up any scheduling opportunities. | -| `yarn.scheduler.fair.allow-undeclared-pools` | If this is true, new queues can be created at application submission time, whether because they are specified as the application's queue by the submitter or because they are placed there by the user-as-default-queue property. If this is false, any time an app would be placed in a queue that is not specified in the allocations file, it is placed in the "default" queue instead. Defaults to true. If a queue placement policy is given in the allocations file, this property is ignored. | +| `yarn.scheduler.fair.allow-undeclared-pools` | If this is true, new queues can be created at application submission time, whether because they are specified as the application's queue by the submitter or because they are placed there by the user-as-default-queue property. If this is false, any time an app would be placed in a queue that is not specified in the allocations file, it is placed in the "default" queue instead. Defaults to true. **Note:** If this is set to false, the "default" queue also needs to be declared in the allocations file. If a queue placement policy is given in the allocations file, this property is ignored. | | `yarn.scheduler.fair.update-interval-ms` | The interval at which to lock the scheduler and recalculate fair shares, recalculate demand, and check whether anything is due for preemption. Defaults to 500 ms. | | `yarn.resource-types.memory-mb.increment-allocation` | The fairscheduler grants memory in increments of this value. If you submit a task with resource request that is not a multiple of `memory-mb.increment-allocation`, the request will be rounded up to the nearest increment. Defaults to 1024 MB. | | `yarn.resource-types.vcores.increment-allocation` | The fairscheduler grants vcores in increments of this value. If you submit a task with resource request that is not a multiple of `vcores.increment-allocation`, the request will be rounded up to the nearest increment. Defaults to 1. | From ce89881fb9c83a116df0a6eb300db6e500a9f1d0 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Mon, 24 May 2021 15:15:39 +0530 Subject: [PATCH 343/666] HDFS-15790. Make ProtobufRpcEngineProtos and ProtobufRpcEngineProtos2 Co-Exist (#2767) --- hadoop-common-project/hadoop-common/pom.xml | 49 +- .../apache/hadoop/ipc/ProtobufRpcEngine.java | 215 +- .../apache/hadoop/ipc/ProtobufRpcEngine2.java | 63 +- .../main/java/org/apache/hadoop/ipc/RPC.java | 9 +- .../hadoop/ipc/protobuf/TestProtosLegacy.java | 9892 +++++++++++++++++ .../protobuf/TestRpcServiceProtosLegacy.java | 3313 ++++++ .../apache/hadoop/ipc/TestProtoBufRpc.java | 152 +- .../ipc/TestProtoBufRpcServerHandoff.java | 2 +- .../src/test/proto/test_legacy.proto | 101 + .../test/proto/test_rpc_service_legacy.proto | 79 + 10 files changed, 13699 insertions(+), 176 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestProtosLegacy.java create mode 100644 hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestRpcServiceProtosLegacy.java create mode 100644 hadoop-common-project/hadoop-common/src/test/proto/test_legacy.proto create mode 100644 hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service_legacy.proto diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index e7ad1344a8f27..436c15e4fc3cc 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -428,7 +428,12 @@ src-test-compile-protoc - false + + false + + *legacy.proto + + @@ -449,6 +454,10 @@ replace-generated-test-sources false + + **/TestProtosLegacy.java + **/TestRpcServiceProtosLegacy.java + @@ -461,6 +470,7 @@ **/RpcWritable.java **/ProtobufRpcEngineCallback.java **/ProtobufRpcEngine.java + **/ProtobufRpcEngine2.java **/ProtobufRpcEngineProtos.java @@ -469,6 +479,9 @@ replace-test-sources false + + **/TestProtoBufRpc.java + @@ -1092,6 +1105,18 @@ + + add-test-source-legacy-protobuf + generate-test-sources + + add-test-source + + + + ${basedir}/src/test/arm-java + + + @@ -1133,6 +1158,28 @@ + + src-test-compile-protoc-legacy + generate-test-sources + + compile + + + false + + + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + + false + ${basedir}/src/test/proto + ${project.build.directory}/generated-test-sources/java + false + + test_legacy.proto + test_rpc_service_legacy.proto + + + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java index 882cc141d8913..3277f378b1e6a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ipc; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingService; import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; @@ -31,16 +30,15 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.Client.ConnectionId; -import org.apache.hadoop.ipc.RPC.RpcInvoker; -import org.apache.hadoop.ipc.RPC.RpcKind; import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.AsyncGet; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.tracing.TraceScope; import org.apache.hadoop.tracing.Tracer; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.AsyncGet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,9 +66,8 @@ public class ProtobufRpcEngine implements RpcEngine { ASYNC_RETURN_MESSAGE = new ThreadLocal<>(); static { // Register the rpcRequest deserializer for ProtobufRpcEngine - org.apache.hadoop.ipc.Server.registerProtocolEngine( - RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class, - new Server.ProtoBufRpcInvoker()); + //These will be used in server side, which is always ProtobufRpcEngine2 + ProtobufRpcEngine2.registerProtocolEngine(); } private static final ClientCache CLIENTS = new ClientCache(); @@ -352,8 +349,6 @@ static Client getClient(Configuration conf) { return CLIENTS.getClient(conf, SocketFactory.getDefault(), RpcWritable.Buffer.class); } - - @Override public RPC.Server getServer(Class protocol, Object protocolImpl, @@ -366,25 +361,17 @@ public RPC.Server getServer(Class protocol, Object protocolImpl, numHandlers, numReaders, queueSizePerHandler, verbose, secretManager, portRangeConfig, alignmentContext); } - - public static class Server extends RPC.Server { + + /** + * Server implementation is always ProtobufRpcEngine2 based implementation, + * supports backward compatibility for protobuf 2.5 based implementations, + * which uses non-shaded protobuf classes. + */ + public static class Server extends ProtobufRpcEngine2.Server { static final ThreadLocal currentCallback = new ThreadLocal<>(); - static final ThreadLocal currentCallInfo = new ThreadLocal<>(); - private static final RpcInvoker RPC_INVOKER = new ProtoBufRpcInvoker(); - - static class CallInfo { - private final RPC.Server server; - private final String methodName; - - public CallInfo(RPC.Server server, String methodName) { - this.server = server; - this.methodName = methodName; - } - } - static class ProtobufRpcEngineCallbackImpl implements ProtobufRpcEngineCallback { @@ -394,9 +381,9 @@ static class ProtobufRpcEngineCallbackImpl private final long setupTime; public ProtobufRpcEngineCallbackImpl() { - this.server = currentCallInfo.get().server; + this.server = CURRENT_CALL_INFO.get().getServer(); this.call = Server.getCurCall().get(); - this.methodName = currentCallInfo.get().methodName; + this.methodName = CURRENT_CALL_INFO.get().getMethodName(); this.setupTime = Time.now(); } @@ -443,144 +430,58 @@ public Server(Class protocolClass, Object protocolImpl, SecretManager secretManager, String portRangeConfig, AlignmentContext alignmentContext) throws IOException { - super(bindAddress, port, null, numHandlers, - numReaders, queueSizePerHandler, conf, - serverNameFromClass(protocolImpl.getClass()), secretManager, - portRangeConfig); - setAlignmentContext(alignmentContext); - this.verbose = verbose; - registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass, - protocolImpl); - } - - @Override - protected RpcInvoker getServerRpcInvoker(RpcKind rpcKind) { - if (rpcKind == RpcKind.RPC_PROTOCOL_BUFFER) { - return RPC_INVOKER; - } - return super.getServerRpcInvoker(rpcKind); + super(protocolClass, protocolImpl, conf, bindAddress, port, numHandlers, + numReaders, queueSizePerHandler, verbose, secretManager, + portRangeConfig, alignmentContext); } /** - * Protobuf invoker for {@link RpcInvoker} + * This implementation is same as + * ProtobufRpcEngine2.Server.ProtobufInvoker#call(..) + * except this implementation uses non-shaded protobuf classes from legacy + * protobuf version (default 2.5.0). */ - static class ProtoBufRpcInvoker implements RpcInvoker { - private static ProtoClassProtoImpl getProtocolImpl(RPC.Server server, - String protoName, long clientVersion) throws RpcServerException { - ProtoNameVer pv = new ProtoNameVer(protoName, clientVersion); - ProtoClassProtoImpl impl = - server.getProtocolImplMap(RPC.RpcKind.RPC_PROTOCOL_BUFFER).get(pv); - if (impl == null) { // no match for Protocol AND Version - VerProtocolImpl highest = - server.getHighestSupportedProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, - protoName); - if (highest == null) { - throw new RpcNoSuchProtocolException( - "Unknown protocol: " + protoName); - } - // protocol supported but not the version that client wants - throw new RPC.VersionMismatch(protoName, clientVersion, - highest.version); - } - return impl; + static RpcWritable processCall(RPC.Server server, + String connectionProtocolName, RpcWritable.Buffer request, + String methodName, ProtoClassProtoImpl protocolImpl) throws Exception { + BlockingService service = (BlockingService) protocolImpl.protocolImpl; + MethodDescriptor methodDescriptor = service.getDescriptorForType() + .findMethodByName(methodName); + if (methodDescriptor == null) { + String msg = "Unknown method " + methodName + " called on " + + connectionProtocolName + " protocol."; + LOG.warn(msg); + throw new RpcNoSuchMethodException(msg); } + Message prototype = service.getRequestPrototype(methodDescriptor); + Message param = request.getValue(prototype); - @Override - /** - * This is a server side method, which is invoked over RPC. On success - * the return response has protobuf response payload. On failure, the - * exception name and the stack trace are returned in the response. - * See {@link HadoopRpcResponseProto} - * - * In this method there three types of exceptions possible and they are - * returned in response as follows. - *
      - *
    1. Exceptions encountered in this method that are returned - * as {@link RpcServerException}
    2. - *
    3. Exceptions thrown by the service is wrapped in ServiceException. - * In that this method returns in response the exception thrown by the - * service.
    4. - *
    5. Other exceptions thrown by the service. They are returned as - * it is.
    6. - *
    - */ - public Writable call(RPC.Server server, String connectionProtocolName, - Writable writableRequest, long receiveTime) throws Exception { - RpcProtobufRequest request = (RpcProtobufRequest) writableRequest; - RequestHeaderProto rpcRequest = request.getRequestHeader(); - String methodName = rpcRequest.getMethodName(); - - /** - * RPCs for a particular interface (ie protocol) are done using a - * IPC connection that is setup using rpcProxy. - * The rpcProxy's has a declared protocol name that is - * sent form client to server at connection time. - * - * Each Rpc call also sends a protocol name - * (called declaringClassprotocolName). This name is usually the same - * as the connection protocol name except in some cases. - * For example metaProtocols such ProtocolInfoProto which get info - * about the protocol reuse the connection but need to indicate that - * the actual protocol is different (i.e. the protocol is - * ProtocolInfoProto) since they reuse the connection; in this case - * the declaringClassProtocolName field is set to the ProtocolInfoProto. - */ - - String declaringClassProtoName = - rpcRequest.getDeclaringClassProtocolName(); - long clientVersion = rpcRequest.getClientProtocolVersion(); - return call(server, connectionProtocolName, request, receiveTime, - methodName, declaringClassProtoName, clientVersion); - } - - protected Writable call(RPC.Server server, String connectionProtocolName, - RpcWritable.Buffer request, long receiveTime, String methodName, - String declaringClassProtoName, long clientVersion) throws Exception { - if (server.verbose) - LOG.info("Call: connectionProtocolName=" + connectionProtocolName + - ", method=" + methodName); - - ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, - declaringClassProtoName, clientVersion); - BlockingService service = (BlockingService) protocolImpl.protocolImpl; - MethodDescriptor methodDescriptor = service.getDescriptorForType() - .findMethodByName(methodName); - if (methodDescriptor == null) { - String msg = "Unknown method " + methodName + " called on " - + connectionProtocolName + " protocol."; - LOG.warn(msg); - throw new RpcNoSuchMethodException(msg); - } - Message prototype = service.getRequestPrototype(methodDescriptor); - Message param = request.getValue(prototype); - - Message result; - Call currentCall = Server.getCurCall().get(); - try { - server.rpcDetailedMetrics.init(protocolImpl.protocolClass); - currentCallInfo.set(new CallInfo(server, methodName)); - currentCall.setDetailedMetricsName(methodName); - result = service.callBlockingMethod(methodDescriptor, null, param); - // Check if this needs to be a deferred response, - // by checking the ThreadLocal callback being set - if (currentCallback.get() != null) { - currentCall.deferResponse(); - currentCallback.set(null); - return null; - } - } catch (ServiceException e) { - Exception exception = (Exception) e.getCause(); - currentCall.setDetailedMetricsName( - exception.getClass().getSimpleName()); - throw (Exception) e.getCause(); - } catch (Exception e) { - currentCall.setDetailedMetricsName(e.getClass().getSimpleName()); - throw e; - } finally { - currentCallInfo.set(null); + Message result; + Call currentCall = Server.getCurCall().get(); + try { + server.rpcDetailedMetrics.init(protocolImpl.protocolClass); + CURRENT_CALL_INFO.set(new CallInfo(server, methodName)); + currentCall.setDetailedMetricsName(methodName); + result = service.callBlockingMethod(methodDescriptor, null, param); + // Check if this needs to be a deferred response, + // by checking the ThreadLocal callback being set + if (currentCallback.get() != null) { + currentCall.deferResponse(); + currentCallback.set(null); + return null; } - return RpcWritable.wrap(result); + } catch (ServiceException e) { + Exception exception = (Exception) e.getCause(); + currentCall + .setDetailedMetricsName(exception.getClass().getSimpleName()); + throw (Exception) e.getCause(); + } catch (Exception e) { + currentCall.setDetailedMetricsName(e.getClass().getSimpleName()); + throw e; + } finally { + CURRENT_CALL_INFO.set(null); } + return RpcWritable.wrap(result); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java index 2f5d56437d034..a68f18f52c7ae 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java @@ -18,9 +18,6 @@ package org.apache.hadoop.ipc; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.protobuf.*; -import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -33,6 +30,12 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.protobuf.BlockingService; +import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor; +import org.apache.hadoop.thirdparty.protobuf.Message; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; +import org.apache.hadoop.thirdparty.protobuf.TextFormat; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.AsyncGet; import org.apache.hadoop.tracing.Tracer; @@ -61,9 +64,16 @@ public class ProtobufRpcEngine2 implements RpcEngine { ASYNC_RETURN_MESSAGE = new ThreadLocal<>(); static { // Register the rpcRequest deserializer for ProtobufRpcEngine - org.apache.hadoop.ipc.Server.registerProtocolEngine( - RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class, - new Server.ProtoBufRpcInvoker()); + registerProtocolEngine(); + } + + static void registerProtocolEngine() { + if (Server.getRpcInvoker(RPC.RpcKind.RPC_PROTOCOL_BUFFER) == null) { + org.apache.hadoop.ipc.Server + .registerProtocolEngine(RPC.RpcKind.RPC_PROTOCOL_BUFFER, + ProtobufRpcEngine2.RpcProtobufRequest.class, + new Server.ProtoBufRpcInvoker()); + } } private static final ClientCache CLIENTS = new ClientCache(); @@ -383,6 +393,14 @@ static class CallInfo { this.server = server; this.methodName = methodName; } + + public RPC.Server getServer() { + return server; + } + + public String getMethodName() { + return methodName; + } } static class ProtobufRpcEngineCallbackImpl @@ -394,9 +412,9 @@ static class ProtobufRpcEngineCallbackImpl private final long setupTime; ProtobufRpcEngineCallbackImpl() { - this.server = CURRENT_CALL_INFO.get().server; + this.server = CURRENT_CALL_INFO.get().getServer(); this.call = Server.getCurCall().get(); - this.methodName = CURRENT_CALL_INFO.get().methodName; + this.methodName = CURRENT_CALL_INFO.get().getMethodName(); this.setupTime = Time.now(); } @@ -417,7 +435,7 @@ public void error(Throwable t) { } @InterfaceStability.Unstable - public static ProtobufRpcEngineCallback2 registerForDeferredResponse() { + public static ProtobufRpcEngineCallback2 registerForDeferredResponse2() { ProtobufRpcEngineCallback2 callback = new ProtobufRpcEngineCallbackImpl(); CURRENT_CALLBACK.set(callback); return callback; @@ -453,6 +471,17 @@ public Server(Class protocolClass, Object protocolImpl, protocolImpl); } + //Use the latest protobuf rpc invoker itself as that is backward compatible. + private static final RpcInvoker RPC_INVOKER = new ProtoBufRpcInvoker(); + + @Override + protected RpcInvoker getServerRpcInvoker(RPC.RpcKind rpcKind) { + if (rpcKind == RPC.RpcKind.RPC_PROTOCOL_BUFFER) { + return RPC_INVOKER; + } + return super.getServerRpcInvoker(rpcKind); + } + /** * Protobuf invoker for {@link RpcInvoker}. */ @@ -524,6 +553,7 @@ public Writable call(RPC.Server server, String connectionProtocolName, methodName, declaringClassProtoName, clientVersion); } + @SuppressWarnings("deprecation") protected Writable call(RPC.Server server, String connectionProtocolName, RpcWritable.Buffer request, long receiveTime, String methodName, String declaringClassProtoName, long clientVersion) throws Exception { @@ -534,6 +564,21 @@ protected Writable call(RPC.Server server, String connectionProtocolName, ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, declaringClassProtoName, clientVersion); + if (protocolImpl.isShadedPBImpl()) { + return call(server, connectionProtocolName, request, methodName, + protocolImpl); + } + //Legacy protobuf implementation. Handle using legacy (Non-shaded) + // protobuf classes. + return ProtobufRpcEngine.Server + .processCall(server, connectionProtocolName, request, methodName, + protocolImpl); + } + + private RpcWritable call(RPC.Server server, + String connectionProtocolName, RpcWritable.Buffer request, + String methodName, ProtoClassProtoImpl protocolImpl) + throws Exception { BlockingService service = (BlockingService) protocolImpl.protocolImpl; MethodDescriptor methodDescriptor = service.getDescriptorForType() .findMethodByName(methodName); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java index 6169fef7f6d16..3bbd82d153a09 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java @@ -937,11 +937,18 @@ public int hashCode() { */ static class ProtoClassProtoImpl { final Class protocolClass; - final Object protocolImpl; + final Object protocolImpl; + private final boolean shadedPBImpl; + ProtoClassProtoImpl(Class protocolClass, Object protocolImpl) { this.protocolClass = protocolClass; this.protocolImpl = protocolImpl; + this.shadedPBImpl = protocolImpl instanceof BlockingService; } + + public boolean isShadedPBImpl() { + return shadedPBImpl; + } } ArrayList> protocolImplMapArray = diff --git a/hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestProtosLegacy.java b/hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestProtosLegacy.java new file mode 100644 index 0000000000000..0a7c1f917bae4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestProtosLegacy.java @@ -0,0 +1,9892 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// This is class is added to source because for arm protoc 2.5.0 executable +// is not available to generate the same code. +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: test_legacy.proto + +package org.apache.hadoop.ipc.protobuf; + +public final class TestProtosLegacy { + private TestProtosLegacy() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface EmptyRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hadoop.common.EmptyRequestProto} + */ + public static final class EmptyRequestProto extends + com.google.protobuf.GeneratedMessage + implements EmptyRequestProtoOrBuilder { + // Use EmptyRequestProto.newBuilder() to construct. + private EmptyRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private EmptyRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EmptyRequestProto defaultInstance; + public static EmptyRequestProto getDefaultInstance() { + return defaultInstance; + } + + public EmptyRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private EmptyRequestProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EmptyRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EmptyRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.EmptyRequestProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyRequestProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.EmptyRequestProto) + } + + static { + defaultInstance = new EmptyRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.EmptyRequestProto) + } + + public interface EmptyResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hadoop.common.EmptyResponseProto} + */ + public static final class EmptyResponseProto extends + com.google.protobuf.GeneratedMessage + implements EmptyResponseProtoOrBuilder { + // Use EmptyResponseProto.newBuilder() to construct. + private EmptyResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private EmptyResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EmptyResponseProto defaultInstance; + public static EmptyResponseProto getDefaultInstance() { + return defaultInstance; + } + + public EmptyResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private EmptyResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EmptyResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EmptyResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.EmptyResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EmptyResponseProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.EmptyResponseProto) + } + + static { + defaultInstance = new EmptyResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.EmptyResponseProto) + } + + public interface EchoRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string message = 1; + /** + * required string message = 1; + */ + boolean hasMessage(); + /** + * required string message = 1; + */ + java.lang.String getMessage(); + /** + * required string message = 1; + */ + com.google.protobuf.ByteString + getMessageBytes(); + } + /** + * Protobuf type {@code hadoop.common.EchoRequestProto} + */ + public static final class EchoRequestProto extends + com.google.protobuf.GeneratedMessage + implements EchoRequestProtoOrBuilder { + // Use EchoRequestProto.newBuilder() to construct. + private EchoRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private EchoRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EchoRequestProto defaultInstance; + public static EchoRequestProto getDefaultInstance() { + return defaultInstance; + } + + public EchoRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private EchoRequestProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + message_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EchoRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EchoRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string message = 1; + public static final int MESSAGE_FIELD_NUMBER = 1; + private java.lang.Object message_; + /** + * required string message = 1; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string message = 1; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + message_ = s; + } + return s; + } + } + /** + * required string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + message_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasMessage()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getMessageBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getMessageBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto) obj; + + boolean result = true; + result = result && (hasMessage() == other.hasMessage()); + if (hasMessage()) { + result = result && getMessage() + .equals(other.getMessage()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasMessage()) { + hash = (37 * hash) + MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getMessage().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.EchoRequestProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + message_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoRequestProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.message_ = message_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.getDefaultInstance()) return this; + if (other.hasMessage()) { + bitField0_ |= 0x00000001; + message_ = other.message_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasMessage()) { + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string message = 1; + private java.lang.Object message_ = ""; + /** + * required string message = 1; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string message = 1; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + message_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string message = 1; + */ + public Builder setMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + message_ = value; + onChanged(); + return this; + } + /** + * required string message = 1; + */ + public Builder clearMessage() { + bitField0_ = (bitField0_ & ~0x00000001); + message_ = getDefaultInstance().getMessage(); + onChanged(); + return this; + } + /** + * required string message = 1; + */ + public Builder setMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + message_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.EchoRequestProto) + } + + static { + defaultInstance = new EchoRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.EchoRequestProto) + } + + public interface EchoResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string message = 1; + /** + * required string message = 1; + */ + boolean hasMessage(); + /** + * required string message = 1; + */ + java.lang.String getMessage(); + /** + * required string message = 1; + */ + com.google.protobuf.ByteString + getMessageBytes(); + } + /** + * Protobuf type {@code hadoop.common.EchoResponseProto} + */ + public static final class EchoResponseProto extends + com.google.protobuf.GeneratedMessage + implements EchoResponseProtoOrBuilder { + // Use EchoResponseProto.newBuilder() to construct. + private EchoResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private EchoResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EchoResponseProto defaultInstance; + public static EchoResponseProto getDefaultInstance() { + return defaultInstance; + } + + public EchoResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private EchoResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + message_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EchoResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EchoResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string message = 1; + public static final int MESSAGE_FIELD_NUMBER = 1; + private java.lang.Object message_; + /** + * required string message = 1; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string message = 1; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + message_ = s; + } + return s; + } + } + /** + * required string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + message_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasMessage()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getMessageBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getMessageBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto) obj; + + boolean result = true; + result = result && (hasMessage() == other.hasMessage()); + if (hasMessage()) { + result = result && getMessage() + .equals(other.getMessage()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasMessage()) { + hash = (37 * hash) + MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getMessage().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.EchoResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + message_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoResponseProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.message_ = message_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance()) return this; + if (other.hasMessage()) { + bitField0_ |= 0x00000001; + message_ = other.message_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasMessage()) { + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string message = 1; + private java.lang.Object message_ = ""; + /** + * required string message = 1; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string message = 1; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + message_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string message = 1; + */ + public Builder setMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + message_ = value; + onChanged(); + return this; + } + /** + * required string message = 1; + */ + public Builder clearMessage() { + bitField0_ = (bitField0_ & ~0x00000001); + message_ = getDefaultInstance().getMessage(); + onChanged(); + return this; + } + /** + * required string message = 1; + */ + public Builder setMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + message_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.EchoResponseProto) + } + + static { + defaultInstance = new EchoResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.EchoResponseProto) + } + + public interface OptRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string message = 1; + /** + * optional string message = 1; + */ + boolean hasMessage(); + /** + * optional string message = 1; + */ + java.lang.String getMessage(); + /** + * optional string message = 1; + */ + com.google.protobuf.ByteString + getMessageBytes(); + } + /** + * Protobuf type {@code hadoop.common.OptRequestProto} + */ + public static final class OptRequestProto extends + com.google.protobuf.GeneratedMessage + implements OptRequestProtoOrBuilder { + // Use OptRequestProto.newBuilder() to construct. + private OptRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private OptRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final OptRequestProto defaultInstance; + public static OptRequestProto getDefaultInstance() { + return defaultInstance; + } + + public OptRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private OptRequestProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + message_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_OptRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_OptRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public OptRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new OptRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string message = 1; + public static final int MESSAGE_FIELD_NUMBER = 1; + private java.lang.Object message_; + /** + * optional string message = 1; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string message = 1; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + message_ = s; + } + return s; + } + } + /** + * optional string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + message_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getMessageBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getMessageBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto) obj; + + boolean result = true; + result = result && (hasMessage() == other.hasMessage()); + if (hasMessage()) { + result = result && getMessage() + .equals(other.getMessage()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasMessage()) { + hash = (37 * hash) + MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getMessage().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.OptRequestProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_OptRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_OptRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + message_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_OptRequestProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.message_ = message_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto.getDefaultInstance()) return this; + if (other.hasMessage()) { + bitField0_ |= 0x00000001; + message_ = other.message_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string message = 1; + private java.lang.Object message_ = ""; + /** + * optional string message = 1; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string message = 1; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + message_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string message = 1; + */ + public Builder setMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + message_ = value; + onChanged(); + return this; + } + /** + * optional string message = 1; + */ + public Builder clearMessage() { + bitField0_ = (bitField0_ & ~0x00000001); + message_ = getDefaultInstance().getMessage(); + onChanged(); + return this; + } + /** + * optional string message = 1; + */ + public Builder setMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + message_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.OptRequestProto) + } + + static { + defaultInstance = new OptRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.OptRequestProto) + } + + public interface OptResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string message = 1; + /** + * optional string message = 1; + */ + boolean hasMessage(); + /** + * optional string message = 1; + */ + java.lang.String getMessage(); + /** + * optional string message = 1; + */ + com.google.protobuf.ByteString + getMessageBytes(); + } + /** + * Protobuf type {@code hadoop.common.OptResponseProto} + */ + public static final class OptResponseProto extends + com.google.protobuf.GeneratedMessage + implements OptResponseProtoOrBuilder { + // Use OptResponseProto.newBuilder() to construct. + private OptResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private OptResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final OptResponseProto defaultInstance; + public static OptResponseProto getDefaultInstance() { + return defaultInstance; + } + + public OptResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private OptResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + message_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_OptResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_OptResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public OptResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new OptResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string message = 1; + public static final int MESSAGE_FIELD_NUMBER = 1; + private java.lang.Object message_; + /** + * optional string message = 1; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string message = 1; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + message_ = s; + } + return s; + } + } + /** + * optional string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + message_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getMessageBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getMessageBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto) obj; + + boolean result = true; + result = result && (hasMessage() == other.hasMessage()); + if (hasMessage()) { + result = result && getMessage() + .equals(other.getMessage()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasMessage()) { + hash = (37 * hash) + MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getMessage().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.OptResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_OptResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_OptResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + message_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_OptResponseProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.message_ = message_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.getDefaultInstance()) return this; + if (other.hasMessage()) { + bitField0_ |= 0x00000001; + message_ = other.message_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string message = 1; + private java.lang.Object message_ = ""; + /** + * optional string message = 1; + */ + public boolean hasMessage() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string message = 1; + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + message_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string message = 1; + */ + public Builder setMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + message_ = value; + onChanged(); + return this; + } + /** + * optional string message = 1; + */ + public Builder clearMessage() { + bitField0_ = (bitField0_ & ~0x00000001); + message_ = getDefaultInstance().getMessage(); + onChanged(); + return this; + } + /** + * optional string message = 1; + */ + public Builder setMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + message_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.OptResponseProto) + } + + static { + defaultInstance = new OptResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.OptResponseProto) + } + + public interface SleepRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int32 milliSeconds = 1; + /** + * required int32 milliSeconds = 1; + */ + boolean hasMilliSeconds(); + /** + * required int32 milliSeconds = 1; + */ + int getMilliSeconds(); + } + /** + * Protobuf type {@code hadoop.common.SleepRequestProto} + */ + public static final class SleepRequestProto extends + com.google.protobuf.GeneratedMessage + implements SleepRequestProtoOrBuilder { + // Use SleepRequestProto.newBuilder() to construct. + private SleepRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SleepRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SleepRequestProto defaultInstance; + public static SleepRequestProto getDefaultInstance() { + return defaultInstance; + } + + public SleepRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SleepRequestProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + milliSeconds_ = input.readInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SleepRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SleepRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int32 milliSeconds = 1; + public static final int MILLISECONDS_FIELD_NUMBER = 1; + private int milliSeconds_; + /** + * required int32 milliSeconds = 1; + */ + public boolean hasMilliSeconds() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 milliSeconds = 1; + */ + public int getMilliSeconds() { + return milliSeconds_; + } + + private void initFields() { + milliSeconds_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasMilliSeconds()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(1, milliSeconds_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, milliSeconds_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto) obj; + + boolean result = true; + result = result && (hasMilliSeconds() == other.hasMilliSeconds()); + if (hasMilliSeconds()) { + result = result && (getMilliSeconds() + == other.getMilliSeconds()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasMilliSeconds()) { + hash = (37 * hash) + MILLISECONDS_FIELD_NUMBER; + hash = (53 * hash) + getMilliSeconds(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.SleepRequestProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + milliSeconds_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepRequestProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.milliSeconds_ = milliSeconds_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.getDefaultInstance()) return this; + if (other.hasMilliSeconds()) { + setMilliSeconds(other.getMilliSeconds()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasMilliSeconds()) { + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int32 milliSeconds = 1; + private int milliSeconds_ ; + /** + * required int32 milliSeconds = 1; + */ + public boolean hasMilliSeconds() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 milliSeconds = 1; + */ + public int getMilliSeconds() { + return milliSeconds_; + } + /** + * required int32 milliSeconds = 1; + */ + public Builder setMilliSeconds(int value) { + bitField0_ |= 0x00000001; + milliSeconds_ = value; + onChanged(); + return this; + } + /** + * required int32 milliSeconds = 1; + */ + public Builder clearMilliSeconds() { + bitField0_ = (bitField0_ & ~0x00000001); + milliSeconds_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.SleepRequestProto) + } + + static { + defaultInstance = new SleepRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.SleepRequestProto) + } + + public interface SleepResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hadoop.common.SleepResponseProto} + */ + public static final class SleepResponseProto extends + com.google.protobuf.GeneratedMessage + implements SleepResponseProtoOrBuilder { + // Use SleepResponseProto.newBuilder() to construct. + private SleepResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SleepResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SleepResponseProto defaultInstance; + public static SleepResponseProto getDefaultInstance() { + return defaultInstance; + } + + public SleepResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SleepResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SleepResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SleepResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.SleepResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepResponseProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.SleepResponseProto) + } + + static { + defaultInstance = new SleepResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.SleepResponseProto) + } + + public interface SlowPingRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool shouldSlow = 1; + /** + * required bool shouldSlow = 1; + */ + boolean hasShouldSlow(); + /** + * required bool shouldSlow = 1; + */ + boolean getShouldSlow(); + } + /** + * Protobuf type {@code hadoop.common.SlowPingRequestProto} + */ + public static final class SlowPingRequestProto extends + com.google.protobuf.GeneratedMessage + implements SlowPingRequestProtoOrBuilder { + // Use SlowPingRequestProto.newBuilder() to construct. + private SlowPingRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SlowPingRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SlowPingRequestProto defaultInstance; + public static SlowPingRequestProto getDefaultInstance() { + return defaultInstance; + } + + public SlowPingRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SlowPingRequestProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + shouldSlow_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SlowPingRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SlowPingRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SlowPingRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SlowPingRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool shouldSlow = 1; + public static final int SHOULDSLOW_FIELD_NUMBER = 1; + private boolean shouldSlow_; + /** + * required bool shouldSlow = 1; + */ + public boolean hasShouldSlow() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool shouldSlow = 1; + */ + public boolean getShouldSlow() { + return shouldSlow_; + } + + private void initFields() { + shouldSlow_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasShouldSlow()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, shouldSlow_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, shouldSlow_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto) obj; + + boolean result = true; + result = result && (hasShouldSlow() == other.hasShouldSlow()); + if (hasShouldSlow()) { + result = result && (getShouldSlow() + == other.getShouldSlow()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasShouldSlow()) { + hash = (37 * hash) + SHOULDSLOW_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getShouldSlow()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.SlowPingRequestProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SlowPingRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SlowPingRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + shouldSlow_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SlowPingRequestProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.shouldSlow_ = shouldSlow_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto.getDefaultInstance()) return this; + if (other.hasShouldSlow()) { + setShouldSlow(other.getShouldSlow()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasShouldSlow()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool shouldSlow = 1; + private boolean shouldSlow_ ; + /** + * required bool shouldSlow = 1; + */ + public boolean hasShouldSlow() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool shouldSlow = 1; + */ + public boolean getShouldSlow() { + return shouldSlow_; + } + /** + * required bool shouldSlow = 1; + */ + public Builder setShouldSlow(boolean value) { + bitField0_ |= 0x00000001; + shouldSlow_ = value; + onChanged(); + return this; + } + /** + * required bool shouldSlow = 1; + */ + public Builder clearShouldSlow() { + bitField0_ = (bitField0_ & ~0x00000001); + shouldSlow_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.SlowPingRequestProto) + } + + static { + defaultInstance = new SlowPingRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.SlowPingRequestProto) + } + + public interface EchoRequestProto2OrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string message = 1; + /** + * repeated string message = 1; + */ + java.util.List + getMessageList(); + /** + * repeated string message = 1; + */ + int getMessageCount(); + /** + * repeated string message = 1; + */ + java.lang.String getMessage(int index); + /** + * repeated string message = 1; + */ + com.google.protobuf.ByteString + getMessageBytes(int index); + } + /** + * Protobuf type {@code hadoop.common.EchoRequestProto2} + */ + public static final class EchoRequestProto2 extends + com.google.protobuf.GeneratedMessage + implements EchoRequestProto2OrBuilder { + // Use EchoRequestProto2.newBuilder() to construct. + private EchoRequestProto2(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private EchoRequestProto2(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EchoRequestProto2 defaultInstance; + public static EchoRequestProto2 getDefaultInstance() { + return defaultInstance; + } + + public EchoRequestProto2 getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private EchoRequestProto2( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + message_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + message_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + message_ = new com.google.protobuf.UnmodifiableLazyStringList(message_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoRequestProto2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoRequestProto2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EchoRequestProto2 parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EchoRequestProto2(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated string message = 1; + public static final int MESSAGE_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList message_; + /** + * repeated string message = 1; + */ + public java.util.List + getMessageList() { + return message_; + } + /** + * repeated string message = 1; + */ + public int getMessageCount() { + return message_.size(); + } + /** + * repeated string message = 1; + */ + public java.lang.String getMessage(int index) { + return message_.get(index); + } + /** + * repeated string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes(int index) { + return message_.getByteString(index); + } + + private void initFields() { + message_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < message_.size(); i++) { + output.writeBytes(1, message_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < message_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(message_.getByteString(i)); + } + size += dataSize; + size += 1 * getMessageList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2) obj; + + boolean result = true; + result = result && getMessageList() + .equals(other.getMessageList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getMessageCount() > 0) { + hash = (37 * hash) + MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getMessageList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.EchoRequestProto2} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2OrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoRequestProto2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoRequestProto2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + message_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoRequestProto2_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + message_ = new com.google.protobuf.UnmodifiableLazyStringList( + message_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.message_ = message_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2.getDefaultInstance()) return this; + if (!other.message_.isEmpty()) { + if (message_.isEmpty()) { + message_ = other.message_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureMessageIsMutable(); + message_.addAll(other.message_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string message = 1; + private com.google.protobuf.LazyStringList message_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureMessageIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + message_ = new com.google.protobuf.LazyStringArrayList(message_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string message = 1; + */ + public java.util.List + getMessageList() { + return java.util.Collections.unmodifiableList(message_); + } + /** + * repeated string message = 1; + */ + public int getMessageCount() { + return message_.size(); + } + /** + * repeated string message = 1; + */ + public java.lang.String getMessage(int index) { + return message_.get(index); + } + /** + * repeated string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes(int index) { + return message_.getByteString(index); + } + /** + * repeated string message = 1; + */ + public Builder setMessage( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMessageIsMutable(); + message_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string message = 1; + */ + public Builder addMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMessageIsMutable(); + message_.add(value); + onChanged(); + return this; + } + /** + * repeated string message = 1; + */ + public Builder addAllMessage( + java.lang.Iterable values) { + ensureMessageIsMutable(); + super.addAll(values, message_); + onChanged(); + return this; + } + /** + * repeated string message = 1; + */ + public Builder clearMessage() { + message_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string message = 1; + */ + public Builder addMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMessageIsMutable(); + message_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.EchoRequestProto2) + } + + static { + defaultInstance = new EchoRequestProto2(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.EchoRequestProto2) + } + + public interface EchoResponseProto2OrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string message = 1; + /** + * repeated string message = 1; + */ + java.util.List + getMessageList(); + /** + * repeated string message = 1; + */ + int getMessageCount(); + /** + * repeated string message = 1; + */ + java.lang.String getMessage(int index); + /** + * repeated string message = 1; + */ + com.google.protobuf.ByteString + getMessageBytes(int index); + } + /** + * Protobuf type {@code hadoop.common.EchoResponseProto2} + */ + public static final class EchoResponseProto2 extends + com.google.protobuf.GeneratedMessage + implements EchoResponseProto2OrBuilder { + // Use EchoResponseProto2.newBuilder() to construct. + private EchoResponseProto2(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private EchoResponseProto2(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EchoResponseProto2 defaultInstance; + public static EchoResponseProto2 getDefaultInstance() { + return defaultInstance; + } + + public EchoResponseProto2 getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private EchoResponseProto2( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + message_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + message_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + message_ = new com.google.protobuf.UnmodifiableLazyStringList(message_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoResponseProto2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoResponseProto2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EchoResponseProto2 parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EchoResponseProto2(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated string message = 1; + public static final int MESSAGE_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList message_; + /** + * repeated string message = 1; + */ + public java.util.List + getMessageList() { + return message_; + } + /** + * repeated string message = 1; + */ + public int getMessageCount() { + return message_.size(); + } + /** + * repeated string message = 1; + */ + public java.lang.String getMessage(int index) { + return message_.get(index); + } + /** + * repeated string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes(int index) { + return message_.getByteString(index); + } + + private void initFields() { + message_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < message_.size(); i++) { + output.writeBytes(1, message_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < message_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(message_.getByteString(i)); + } + size += dataSize; + size += 1 * getMessageList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2) obj; + + boolean result = true; + result = result && getMessageList() + .equals(other.getMessageList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getMessageCount() > 0) { + hash = (37 * hash) + MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getMessageList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.EchoResponseProto2} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2OrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoResponseProto2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoResponseProto2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + message_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_EchoResponseProto2_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + message_ = new com.google.protobuf.UnmodifiableLazyStringList( + message_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.message_ = message_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.getDefaultInstance()) return this; + if (!other.message_.isEmpty()) { + if (message_.isEmpty()) { + message_ = other.message_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureMessageIsMutable(); + message_.addAll(other.message_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string message = 1; + private com.google.protobuf.LazyStringList message_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureMessageIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + message_ = new com.google.protobuf.LazyStringArrayList(message_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string message = 1; + */ + public java.util.List + getMessageList() { + return java.util.Collections.unmodifiableList(message_); + } + /** + * repeated string message = 1; + */ + public int getMessageCount() { + return message_.size(); + } + /** + * repeated string message = 1; + */ + public java.lang.String getMessage(int index) { + return message_.get(index); + } + /** + * repeated string message = 1; + */ + public com.google.protobuf.ByteString + getMessageBytes(int index) { + return message_.getByteString(index); + } + /** + * repeated string message = 1; + */ + public Builder setMessage( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMessageIsMutable(); + message_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string message = 1; + */ + public Builder addMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMessageIsMutable(); + message_.add(value); + onChanged(); + return this; + } + /** + * repeated string message = 1; + */ + public Builder addAllMessage( + java.lang.Iterable values) { + ensureMessageIsMutable(); + super.addAll(values, message_); + onChanged(); + return this; + } + /** + * repeated string message = 1; + */ + public Builder clearMessage() { + message_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string message = 1; + */ + public Builder addMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureMessageIsMutable(); + message_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.EchoResponseProto2) + } + + static { + defaultInstance = new EchoResponseProto2(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.EchoResponseProto2) + } + + public interface AddRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int32 param1 = 1; + /** + * required int32 param1 = 1; + */ + boolean hasParam1(); + /** + * required int32 param1 = 1; + */ + int getParam1(); + + // required int32 param2 = 2; + /** + * required int32 param2 = 2; + */ + boolean hasParam2(); + /** + * required int32 param2 = 2; + */ + int getParam2(); + } + /** + * Protobuf type {@code hadoop.common.AddRequestProto} + */ + public static final class AddRequestProto extends + com.google.protobuf.GeneratedMessage + implements AddRequestProtoOrBuilder { + // Use AddRequestProto.newBuilder() to construct. + private AddRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddRequestProto defaultInstance; + public static AddRequestProto getDefaultInstance() { + return defaultInstance; + } + + public AddRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddRequestProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + param1_ = input.readInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + param2_ = input.readInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int32 param1 = 1; + public static final int PARAM1_FIELD_NUMBER = 1; + private int param1_; + /** + * required int32 param1 = 1; + */ + public boolean hasParam1() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 param1 = 1; + */ + public int getParam1() { + return param1_; + } + + // required int32 param2 = 2; + public static final int PARAM2_FIELD_NUMBER = 2; + private int param2_; + /** + * required int32 param2 = 2; + */ + public boolean hasParam2() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 param2 = 2; + */ + public int getParam2() { + return param2_; + } + + private void initFields() { + param1_ = 0; + param2_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasParam1()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasParam2()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(1, param1_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt32(2, param2_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, param1_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, param2_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto) obj; + + boolean result = true; + result = result && (hasParam1() == other.hasParam1()); + if (hasParam1()) { + result = result && (getParam1() + == other.getParam1()); + } + result = result && (hasParam2() == other.hasParam2()); + if (hasParam2()) { + result = result && (getParam2() + == other.getParam2()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasParam1()) { + hash = (37 * hash) + PARAM1_FIELD_NUMBER; + hash = (53 * hash) + getParam1(); + } + if (hasParam2()) { + hash = (37 * hash) + PARAM2_FIELD_NUMBER; + hash = (53 * hash) + getParam2(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.AddRequestProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + param1_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + param2_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddRequestProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.param1_ = param1_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.param2_ = param2_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto.getDefaultInstance()) return this; + if (other.hasParam1()) { + setParam1(other.getParam1()); + } + if (other.hasParam2()) { + setParam2(other.getParam2()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasParam1()) { + + return false; + } + if (!hasParam2()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int32 param1 = 1; + private int param1_ ; + /** + * required int32 param1 = 1; + */ + public boolean hasParam1() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 param1 = 1; + */ + public int getParam1() { + return param1_; + } + /** + * required int32 param1 = 1; + */ + public Builder setParam1(int value) { + bitField0_ |= 0x00000001; + param1_ = value; + onChanged(); + return this; + } + /** + * required int32 param1 = 1; + */ + public Builder clearParam1() { + bitField0_ = (bitField0_ & ~0x00000001); + param1_ = 0; + onChanged(); + return this; + } + + // required int32 param2 = 2; + private int param2_ ; + /** + * required int32 param2 = 2; + */ + public boolean hasParam2() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 param2 = 2; + */ + public int getParam2() { + return param2_; + } + /** + * required int32 param2 = 2; + */ + public Builder setParam2(int value) { + bitField0_ |= 0x00000002; + param2_ = value; + onChanged(); + return this; + } + /** + * required int32 param2 = 2; + */ + public Builder clearParam2() { + bitField0_ = (bitField0_ & ~0x00000002); + param2_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.AddRequestProto) + } + + static { + defaultInstance = new AddRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.AddRequestProto) + } + + public interface AddRequestProto2OrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated int32 params = 1; + /** + * repeated int32 params = 1; + */ + java.util.List getParamsList(); + /** + * repeated int32 params = 1; + */ + int getParamsCount(); + /** + * repeated int32 params = 1; + */ + int getParams(int index); + } + /** + * Protobuf type {@code hadoop.common.AddRequestProto2} + */ + public static final class AddRequestProto2 extends + com.google.protobuf.GeneratedMessage + implements AddRequestProto2OrBuilder { + // Use AddRequestProto2.newBuilder() to construct. + private AddRequestProto2(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddRequestProto2(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddRequestProto2 defaultInstance; + public static AddRequestProto2 getDefaultInstance() { + return defaultInstance; + } + + public AddRequestProto2 getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddRequestProto2( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + params_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + params_.add(input.readInt32()); + break; + } + case 10: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { + params_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + while (input.getBytesUntilLimit() > 0) { + params_.add(input.readInt32()); + } + input.popLimit(limit); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + params_ = java.util.Collections.unmodifiableList(params_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddRequestProto2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddRequestProto2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddRequestProto2 parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddRequestProto2(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated int32 params = 1; + public static final int PARAMS_FIELD_NUMBER = 1; + private java.util.List params_; + /** + * repeated int32 params = 1; + */ + public java.util.List + getParamsList() { + return params_; + } + /** + * repeated int32 params = 1; + */ + public int getParamsCount() { + return params_.size(); + } + /** + * repeated int32 params = 1; + */ + public int getParams(int index) { + return params_.get(index); + } + + private void initFields() { + params_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < params_.size(); i++) { + output.writeInt32(1, params_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < params_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeInt32SizeNoTag(params_.get(i)); + } + size += dataSize; + size += 1 * getParamsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2) obj; + + boolean result = true; + result = result && getParamsList() + .equals(other.getParamsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getParamsCount() > 0) { + hash = (37 * hash) + PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getParamsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.AddRequestProto2} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2OrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddRequestProto2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddRequestProto2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + params_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddRequestProto2_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + params_ = java.util.Collections.unmodifiableList(params_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.params_ = params_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2.getDefaultInstance()) return this; + if (!other.params_.isEmpty()) { + if (params_.isEmpty()) { + params_ = other.params_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureParamsIsMutable(); + params_.addAll(other.params_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated int32 params = 1; + private java.util.List params_ = java.util.Collections.emptyList(); + private void ensureParamsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + params_ = new java.util.ArrayList(params_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated int32 params = 1; + */ + public java.util.List + getParamsList() { + return java.util.Collections.unmodifiableList(params_); + } + /** + * repeated int32 params = 1; + */ + public int getParamsCount() { + return params_.size(); + } + /** + * repeated int32 params = 1; + */ + public int getParams(int index) { + return params_.get(index); + } + /** + * repeated int32 params = 1; + */ + public Builder setParams( + int index, int value) { + ensureParamsIsMutable(); + params_.set(index, value); + onChanged(); + return this; + } + /** + * repeated int32 params = 1; + */ + public Builder addParams(int value) { + ensureParamsIsMutable(); + params_.add(value); + onChanged(); + return this; + } + /** + * repeated int32 params = 1; + */ + public Builder addAllParams( + java.lang.Iterable values) { + ensureParamsIsMutable(); + super.addAll(values, params_); + onChanged(); + return this; + } + /** + * repeated int32 params = 1; + */ + public Builder clearParams() { + params_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.AddRequestProto2) + } + + static { + defaultInstance = new AddRequestProto2(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.AddRequestProto2) + } + + public interface AddResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int32 result = 1; + /** + * required int32 result = 1; + */ + boolean hasResult(); + /** + * required int32 result = 1; + */ + int getResult(); + } + /** + * Protobuf type {@code hadoop.common.AddResponseProto} + */ + public static final class AddResponseProto extends + com.google.protobuf.GeneratedMessage + implements AddResponseProtoOrBuilder { + // Use AddResponseProto.newBuilder() to construct. + private AddResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddResponseProto defaultInstance; + public static AddResponseProto getDefaultInstance() { + return defaultInstance; + } + + public AddResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + result_ = input.readInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int32 result = 1; + public static final int RESULT_FIELD_NUMBER = 1; + private int result_; + /** + * required int32 result = 1; + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 result = 1; + */ + public int getResult() { + return result_; + } + + private void initFields() { + result_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasResult()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(1, result_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, result_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto) obj; + + boolean result = true; + result = result && (hasResult() == other.hasResult()); + if (hasResult()) { + result = result && (getResult() + == other.getResult()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasResult()) { + hash = (37 * hash) + RESULT_FIELD_NUMBER; + hash = (53 * hash) + getResult(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.AddResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + result_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AddResponseProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.result_ = result_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance()) return this; + if (other.hasResult()) { + setResult(other.getResult()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasResult()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int32 result = 1; + private int result_ ; + /** + * required int32 result = 1; + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 result = 1; + */ + public int getResult() { + return result_; + } + /** + * required int32 result = 1; + */ + public Builder setResult(int value) { + bitField0_ |= 0x00000001; + result_ = value; + onChanged(); + return this; + } + /** + * required int32 result = 1; + */ + public Builder clearResult() { + bitField0_ = (bitField0_ & ~0x00000001); + result_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.AddResponseProto) + } + + static { + defaultInstance = new AddResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.AddResponseProto) + } + + public interface ExchangeRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated int32 values = 1; + /** + * repeated int32 values = 1; + */ + java.util.List getValuesList(); + /** + * repeated int32 values = 1; + */ + int getValuesCount(); + /** + * repeated int32 values = 1; + */ + int getValues(int index); + } + /** + * Protobuf type {@code hadoop.common.ExchangeRequestProto} + */ + public static final class ExchangeRequestProto extends + com.google.protobuf.GeneratedMessage + implements ExchangeRequestProtoOrBuilder { + // Use ExchangeRequestProto.newBuilder() to construct. + private ExchangeRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ExchangeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ExchangeRequestProto defaultInstance; + public static ExchangeRequestProto getDefaultInstance() { + return defaultInstance; + } + + public ExchangeRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ExchangeRequestProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + values_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + values_.add(input.readInt32()); + break; + } + case 10: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { + values_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + while (input.getBytesUntilLimit() > 0) { + values_.add(input.readInt32()); + } + input.popLimit(limit); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + values_ = java.util.Collections.unmodifiableList(values_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_ExchangeRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_ExchangeRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ExchangeRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ExchangeRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated int32 values = 1; + public static final int VALUES_FIELD_NUMBER = 1; + private java.util.List values_; + /** + * repeated int32 values = 1; + */ + public java.util.List + getValuesList() { + return values_; + } + /** + * repeated int32 values = 1; + */ + public int getValuesCount() { + return values_.size(); + } + /** + * repeated int32 values = 1; + */ + public int getValues(int index) { + return values_.get(index); + } + + private void initFields() { + values_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < values_.size(); i++) { + output.writeInt32(1, values_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeInt32SizeNoTag(values_.get(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto) obj; + + boolean result = true; + result = result && getValuesList() + .equals(other.getValuesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.ExchangeRequestProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_ExchangeRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_ExchangeRequestProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + values_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_ExchangeRequestProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + values_ = java.util.Collections.unmodifiableList(values_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.values_ = values_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto.getDefaultInstance()) return this; + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated int32 values = 1; + private java.util.List values_ = java.util.Collections.emptyList(); + private void ensureValuesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + values_ = new java.util.ArrayList(values_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated int32 values = 1; + */ + public java.util.List + getValuesList() { + return java.util.Collections.unmodifiableList(values_); + } + /** + * repeated int32 values = 1; + */ + public int getValuesCount() { + return values_.size(); + } + /** + * repeated int32 values = 1; + */ + public int getValues(int index) { + return values_.get(index); + } + /** + * repeated int32 values = 1; + */ + public Builder setValues( + int index, int value) { + ensureValuesIsMutable(); + values_.set(index, value); + onChanged(); + return this; + } + /** + * repeated int32 values = 1; + */ + public Builder addValues(int value) { + ensureValuesIsMutable(); + values_.add(value); + onChanged(); + return this; + } + /** + * repeated int32 values = 1; + */ + public Builder addAllValues( + java.lang.Iterable values) { + ensureValuesIsMutable(); + super.addAll(values, values_); + onChanged(); + return this; + } + /** + * repeated int32 values = 1; + */ + public Builder clearValues() { + values_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.ExchangeRequestProto) + } + + static { + defaultInstance = new ExchangeRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.ExchangeRequestProto) + } + + public interface ExchangeResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated int32 values = 1; + /** + * repeated int32 values = 1; + */ + java.util.List getValuesList(); + /** + * repeated int32 values = 1; + */ + int getValuesCount(); + /** + * repeated int32 values = 1; + */ + int getValues(int index); + } + /** + * Protobuf type {@code hadoop.common.ExchangeResponseProto} + */ + public static final class ExchangeResponseProto extends + com.google.protobuf.GeneratedMessage + implements ExchangeResponseProtoOrBuilder { + // Use ExchangeResponseProto.newBuilder() to construct. + private ExchangeResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ExchangeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ExchangeResponseProto defaultInstance; + public static ExchangeResponseProto getDefaultInstance() { + return defaultInstance; + } + + public ExchangeResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ExchangeResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + values_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + values_.add(input.readInt32()); + break; + } + case 10: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { + values_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + while (input.getBytesUntilLimit() > 0) { + values_.add(input.readInt32()); + } + input.popLimit(limit); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + values_ = java.util.Collections.unmodifiableList(values_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_ExchangeResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_ExchangeResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ExchangeResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ExchangeResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated int32 values = 1; + public static final int VALUES_FIELD_NUMBER = 1; + private java.util.List values_; + /** + * repeated int32 values = 1; + */ + public java.util.List + getValuesList() { + return values_; + } + /** + * repeated int32 values = 1; + */ + public int getValuesCount() { + return values_.size(); + } + /** + * repeated int32 values = 1; + */ + public int getValues(int index) { + return values_.get(index); + } + + private void initFields() { + values_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < values_.size(); i++) { + output.writeInt32(1, values_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeInt32SizeNoTag(values_.get(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto) obj; + + boolean result = true; + result = result && getValuesList() + .equals(other.getValuesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.ExchangeResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_ExchangeResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_ExchangeResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + values_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_ExchangeResponseProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + values_ = java.util.Collections.unmodifiableList(values_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.values_ = values_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.getDefaultInstance()) return this; + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated int32 values = 1; + private java.util.List values_ = java.util.Collections.emptyList(); + private void ensureValuesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + values_ = new java.util.ArrayList(values_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated int32 values = 1; + */ + public java.util.List + getValuesList() { + return java.util.Collections.unmodifiableList(values_); + } + /** + * repeated int32 values = 1; + */ + public int getValuesCount() { + return values_.size(); + } + /** + * repeated int32 values = 1; + */ + public int getValues(int index) { + return values_.get(index); + } + /** + * repeated int32 values = 1; + */ + public Builder setValues( + int index, int value) { + ensureValuesIsMutable(); + values_.set(index, value); + onChanged(); + return this; + } + /** + * repeated int32 values = 1; + */ + public Builder addValues(int value) { + ensureValuesIsMutable(); + values_.add(value); + onChanged(); + return this; + } + /** + * repeated int32 values = 1; + */ + public Builder addAllValues( + java.lang.Iterable values) { + ensureValuesIsMutable(); + super.addAll(values, values_); + onChanged(); + return this; + } + /** + * repeated int32 values = 1; + */ + public Builder clearValues() { + values_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.ExchangeResponseProto) + } + + static { + defaultInstance = new ExchangeResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.ExchangeResponseProto) + } + + public interface AuthMethodResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int32 code = 1; + /** + * required int32 code = 1; + */ + boolean hasCode(); + /** + * required int32 code = 1; + */ + int getCode(); + + // required string mechanismName = 2; + /** + * required string mechanismName = 2; + */ + boolean hasMechanismName(); + /** + * required string mechanismName = 2; + */ + java.lang.String getMechanismName(); + /** + * required string mechanismName = 2; + */ + com.google.protobuf.ByteString + getMechanismNameBytes(); + } + /** + * Protobuf type {@code hadoop.common.AuthMethodResponseProto} + */ + public static final class AuthMethodResponseProto extends + com.google.protobuf.GeneratedMessage + implements AuthMethodResponseProtoOrBuilder { + // Use AuthMethodResponseProto.newBuilder() to construct. + private AuthMethodResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AuthMethodResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AuthMethodResponseProto defaultInstance; + public static AuthMethodResponseProto getDefaultInstance() { + return defaultInstance; + } + + public AuthMethodResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AuthMethodResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + code_ = input.readInt32(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + mechanismName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AuthMethodResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AuthMethodResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AuthMethodResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AuthMethodResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int32 code = 1; + public static final int CODE_FIELD_NUMBER = 1; + private int code_; + /** + * required int32 code = 1; + */ + public boolean hasCode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 code = 1; + */ + public int getCode() { + return code_; + } + + // required string mechanismName = 2; + public static final int MECHANISMNAME_FIELD_NUMBER = 2; + private java.lang.Object mechanismName_; + /** + * required string mechanismName = 2; + */ + public boolean hasMechanismName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string mechanismName = 2; + */ + public java.lang.String getMechanismName() { + java.lang.Object ref = mechanismName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + mechanismName_ = s; + } + return s; + } + } + /** + * required string mechanismName = 2; + */ + public com.google.protobuf.ByteString + getMechanismNameBytes() { + java.lang.Object ref = mechanismName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + mechanismName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + code_ = 0; + mechanismName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasCode()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasMechanismName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(1, code_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getMechanismNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, code_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getMechanismNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto) obj; + + boolean result = true; + result = result && (hasCode() == other.hasCode()); + if (hasCode()) { + result = result && (getCode() + == other.getCode()); + } + result = result && (hasMechanismName() == other.hasMechanismName()); + if (hasMechanismName()) { + result = result && getMechanismName() + .equals(other.getMechanismName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasCode()) { + hash = (37 * hash) + CODE_FIELD_NUMBER; + hash = (53 * hash) + getCode(); + } + if (hasMechanismName()) { + hash = (37 * hash) + MECHANISMNAME_FIELD_NUMBER; + hash = (53 * hash) + getMechanismName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.AuthMethodResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AuthMethodResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AuthMethodResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + code_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + mechanismName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_AuthMethodResponseProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.code_ = code_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.mechanismName_ = mechanismName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.getDefaultInstance()) return this; + if (other.hasCode()) { + setCode(other.getCode()); + } + if (other.hasMechanismName()) { + bitField0_ |= 0x00000002; + mechanismName_ = other.mechanismName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasCode()) { + + return false; + } + if (!hasMechanismName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int32 code = 1; + private int code_ ; + /** + * required int32 code = 1; + */ + public boolean hasCode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 code = 1; + */ + public int getCode() { + return code_; + } + /** + * required int32 code = 1; + */ + public Builder setCode(int value) { + bitField0_ |= 0x00000001; + code_ = value; + onChanged(); + return this; + } + /** + * required int32 code = 1; + */ + public Builder clearCode() { + bitField0_ = (bitField0_ & ~0x00000001); + code_ = 0; + onChanged(); + return this; + } + + // required string mechanismName = 2; + private java.lang.Object mechanismName_ = ""; + /** + * required string mechanismName = 2; + */ + public boolean hasMechanismName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string mechanismName = 2; + */ + public java.lang.String getMechanismName() { + java.lang.Object ref = mechanismName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + mechanismName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string mechanismName = 2; + */ + public com.google.protobuf.ByteString + getMechanismNameBytes() { + java.lang.Object ref = mechanismName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + mechanismName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string mechanismName = 2; + */ + public Builder setMechanismName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + mechanismName_ = value; + onChanged(); + return this; + } + /** + * required string mechanismName = 2; + */ + public Builder clearMechanismName() { + bitField0_ = (bitField0_ & ~0x00000002); + mechanismName_ = getDefaultInstance().getMechanismName(); + onChanged(); + return this; + } + /** + * required string mechanismName = 2; + */ + public Builder setMechanismNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + mechanismName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.AuthMethodResponseProto) + } + + static { + defaultInstance = new AuthMethodResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.AuthMethodResponseProto) + } + + public interface UserResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string user = 1; + /** + * required string user = 1; + */ + boolean hasUser(); + /** + * required string user = 1; + */ + java.lang.String getUser(); + /** + * required string user = 1; + */ + com.google.protobuf.ByteString + getUserBytes(); + } + /** + * Protobuf type {@code hadoop.common.UserResponseProto} + */ + public static final class UserResponseProto extends + com.google.protobuf.GeneratedMessage + implements UserResponseProtoOrBuilder { + // Use UserResponseProto.newBuilder() to construct. + private UserResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private UserResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UserResponseProto defaultInstance; + public static UserResponseProto getDefaultInstance() { + return defaultInstance; + } + + public UserResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UserResponseProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + user_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_UserResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_UserResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UserResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UserResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string user = 1; + public static final int USER_FIELD_NUMBER = 1; + private java.lang.Object user_; + /** + * required string user = 1; + */ + public boolean hasUser() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string user = 1; + */ + public java.lang.String getUser() { + java.lang.Object ref = user_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + user_ = s; + } + return s; + } + } + /** + * required string user = 1; + */ + public com.google.protobuf.ByteString + getUserBytes() { + java.lang.Object ref = user_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + user_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + user_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUser()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getUserBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getUserBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto) obj; + + boolean result = true; + result = result && (hasUser() == other.hasUser()); + if (hasUser()) { + result = result && getUser() + .equals(other.getUser()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUser()) { + hash = (37 * hash) + USER_FIELD_NUMBER; + hash = (53 * hash) + getUser().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.UserResponseProto} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_UserResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_UserResponseProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + user_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_UserResponseProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.user_ = user_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance()) return this; + if (other.hasUser()) { + bitField0_ |= 0x00000001; + user_ = other.user_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUser()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string user = 1; + private java.lang.Object user_ = ""; + /** + * required string user = 1; + */ + public boolean hasUser() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string user = 1; + */ + public java.lang.String getUser() { + java.lang.Object ref = user_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + user_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string user = 1; + */ + public com.google.protobuf.ByteString + getUserBytes() { + java.lang.Object ref = user_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + user_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string user = 1; + */ + public Builder setUser( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + user_ = value; + onChanged(); + return this; + } + /** + * required string user = 1; + */ + public Builder clearUser() { + bitField0_ = (bitField0_ & ~0x00000001); + user_ = getDefaultInstance().getUser(); + onChanged(); + return this; + } + /** + * required string user = 1; + */ + public Builder setUserBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + user_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.UserResponseProto) + } + + static { + defaultInstance = new UserResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.UserResponseProto) + } + + public interface SleepRequestProto2OrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 sleep_time = 1; + /** + * optional int64 sleep_time = 1; + */ + boolean hasSleepTime(); + /** + * optional int64 sleep_time = 1; + */ + long getSleepTime(); + } + /** + * Protobuf type {@code hadoop.common.SleepRequestProto2} + */ + public static final class SleepRequestProto2 extends + com.google.protobuf.GeneratedMessage + implements SleepRequestProto2OrBuilder { + // Use SleepRequestProto2.newBuilder() to construct. + private SleepRequestProto2(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SleepRequestProto2(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SleepRequestProto2 defaultInstance; + public static SleepRequestProto2 getDefaultInstance() { + return defaultInstance; + } + + public SleepRequestProto2 getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SleepRequestProto2( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + sleepTime_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepRequestProto2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepRequestProto2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SleepRequestProto2 parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SleepRequestProto2(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 sleep_time = 1; + public static final int SLEEP_TIME_FIELD_NUMBER = 1; + private long sleepTime_; + /** + * optional int64 sleep_time = 1; + */ + public boolean hasSleepTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 sleep_time = 1; + */ + public long getSleepTime() { + return sleepTime_; + } + + private void initFields() { + sleepTime_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, sleepTime_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, sleepTime_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2) obj; + + boolean result = true; + result = result && (hasSleepTime() == other.hasSleepTime()); + if (hasSleepTime()) { + result = result && (getSleepTime() + == other.getSleepTime()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSleepTime()) { + hash = (37 * hash) + SLEEP_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getSleepTime()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.SleepRequestProto2} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2OrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepRequestProto2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepRequestProto2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + sleepTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepRequestProto2_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.sleepTime_ = sleepTime_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2.getDefaultInstance()) return this; + if (other.hasSleepTime()) { + setSleepTime(other.getSleepTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 sleep_time = 1; + private long sleepTime_ ; + /** + * optional int64 sleep_time = 1; + */ + public boolean hasSleepTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 sleep_time = 1; + */ + public long getSleepTime() { + return sleepTime_; + } + /** + * optional int64 sleep_time = 1; + */ + public Builder setSleepTime(long value) { + bitField0_ |= 0x00000001; + sleepTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 sleep_time = 1; + */ + public Builder clearSleepTime() { + bitField0_ = (bitField0_ & ~0x00000001); + sleepTime_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.SleepRequestProto2) + } + + static { + defaultInstance = new SleepRequestProto2(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.SleepRequestProto2) + } + + public interface SleepResponseProto2OrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 receive_time = 1; + /** + * optional int64 receive_time = 1; + */ + boolean hasReceiveTime(); + /** + * optional int64 receive_time = 1; + */ + long getReceiveTime(); + + // optional int64 response_time = 2; + /** + * optional int64 response_time = 2; + */ + boolean hasResponseTime(); + /** + * optional int64 response_time = 2; + */ + long getResponseTime(); + } + /** + * Protobuf type {@code hadoop.common.SleepResponseProto2} + */ + public static final class SleepResponseProto2 extends + com.google.protobuf.GeneratedMessage + implements SleepResponseProto2OrBuilder { + // Use SleepResponseProto2.newBuilder() to construct. + private SleepResponseProto2(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SleepResponseProto2(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SleepResponseProto2 defaultInstance; + public static SleepResponseProto2 getDefaultInstance() { + return defaultInstance; + } + + public SleepResponseProto2 getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SleepResponseProto2( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + receiveTime_ = input.readInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + responseTime_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepResponseProto2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepResponseProto2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SleepResponseProto2 parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SleepResponseProto2(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 receive_time = 1; + public static final int RECEIVE_TIME_FIELD_NUMBER = 1; + private long receiveTime_; + /** + * optional int64 receive_time = 1; + */ + public boolean hasReceiveTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 receive_time = 1; + */ + public long getReceiveTime() { + return receiveTime_; + } + + // optional int64 response_time = 2; + public static final int RESPONSE_TIME_FIELD_NUMBER = 2; + private long responseTime_; + /** + * optional int64 response_time = 2; + */ + public boolean hasResponseTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 response_time = 2; + */ + public long getResponseTime() { + return responseTime_; + } + + private void initFields() { + receiveTime_ = 0L; + responseTime_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, receiveTime_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, responseTime_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, receiveTime_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, responseTime_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 other = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2) obj; + + boolean result = true; + result = result && (hasReceiveTime() == other.hasReceiveTime()); + if (hasReceiveTime()) { + result = result && (getReceiveTime() + == other.getReceiveTime()); + } + result = result && (hasResponseTime() == other.hasResponseTime()); + if (hasResponseTime()) { + result = result && (getResponseTime() + == other.getResponseTime()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasReceiveTime()) { + hash = (37 * hash) + RECEIVE_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getReceiveTime()); + } + if (hasResponseTime()) { + hash = (37 * hash) + RESPONSE_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getResponseTime()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.SleepResponseProto2} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2OrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepResponseProto2_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepResponseProto2_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.class, org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + receiveTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + responseTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.internal_static_hadoop_common_SleepResponseProto2_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 build() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 buildPartial() { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 result = new org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.receiveTime_ = receiveTime_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.responseTime_ = responseTime_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 other) { + if (other == org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.getDefaultInstance()) return this; + if (other.hasReceiveTime()) { + setReceiveTime(other.getReceiveTime()); + } + if (other.hasResponseTime()) { + setResponseTime(other.getResponseTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 receive_time = 1; + private long receiveTime_ ; + /** + * optional int64 receive_time = 1; + */ + public boolean hasReceiveTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 receive_time = 1; + */ + public long getReceiveTime() { + return receiveTime_; + } + /** + * optional int64 receive_time = 1; + */ + public Builder setReceiveTime(long value) { + bitField0_ |= 0x00000001; + receiveTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 receive_time = 1; + */ + public Builder clearReceiveTime() { + bitField0_ = (bitField0_ & ~0x00000001); + receiveTime_ = 0L; + onChanged(); + return this; + } + + // optional int64 response_time = 2; + private long responseTime_ ; + /** + * optional int64 response_time = 2; + */ + public boolean hasResponseTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 response_time = 2; + */ + public long getResponseTime() { + return responseTime_; + } + /** + * optional int64 response_time = 2; + */ + public Builder setResponseTime(long value) { + bitField0_ |= 0x00000002; + responseTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 response_time = 2; + */ + public Builder clearResponseTime() { + bitField0_ = (bitField0_ & ~0x00000002); + responseTime_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.SleepResponseProto2) + } + + static { + defaultInstance = new SleepResponseProto2(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.SleepResponseProto2) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_EmptyRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_EmptyRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_EmptyResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_EmptyResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_EchoRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_EchoRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_EchoResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_EchoResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_OptRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_OptRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_OptResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_OptResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_SleepRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_SleepRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_SleepResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_SleepResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_SlowPingRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_SlowPingRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_EchoRequestProto2_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_EchoRequestProto2_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_EchoResponseProto2_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_EchoResponseProto2_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_AddRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_AddRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_AddRequestProto2_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_AddRequestProto2_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_AddResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_AddResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_ExchangeRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_ExchangeRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_ExchangeResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_ExchangeResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_AuthMethodResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_AuthMethodResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_UserResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_UserResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_SleepRequestProto2_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_SleepRequestProto2_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_SleepResponseProto2_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_SleepResponseProto2_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\021test_legacy.proto\022\rhadoop.common\"\023\n\021Em" + + "ptyRequestProto\"\024\n\022EmptyResponseProto\"#\n" + + "\020EchoRequestProto\022\017\n\007message\030\001 \002(\t\"$\n\021Ec" + + "hoResponseProto\022\017\n\007message\030\001 \002(\t\"\"\n\017OptR" + + "equestProto\022\017\n\007message\030\001 \001(\t\"#\n\020OptRespo" + + "nseProto\022\017\n\007message\030\001 \001(\t\")\n\021SleepReques" + + "tProto\022\024\n\014milliSeconds\030\001 \002(\005\"\024\n\022SleepRes" + + "ponseProto\"*\n\024SlowPingRequestProto\022\022\n\nsh" + + "ouldSlow\030\001 \002(\010\"$\n\021EchoRequestProto2\022\017\n\007m" + + "essage\030\001 \003(\t\"%\n\022EchoResponseProto2\022\017\n\007me", + "ssage\030\001 \003(\t\"1\n\017AddRequestProto\022\016\n\006param1" + + "\030\001 \002(\005\022\016\n\006param2\030\002 \002(\005\"\"\n\020AddRequestProt" + + "o2\022\016\n\006params\030\001 \003(\005\"\"\n\020AddResponseProto\022\016" + + "\n\006result\030\001 \002(\005\"&\n\024ExchangeRequestProto\022\016" + + "\n\006values\030\001 \003(\005\"\'\n\025ExchangeResponseProto\022" + + "\016\n\006values\030\001 \003(\005\">\n\027AuthMethodResponsePro" + + "to\022\014\n\004code\030\001 \002(\005\022\025\n\rmechanismName\030\002 \002(\t\"" + + "!\n\021UserResponseProto\022\014\n\004user\030\001 \002(\t\"(\n\022Sl" + + "eepRequestProto2\022\022\n\nsleep_time\030\001 \001(\003\"B\n\023" + + "SleepResponseProto2\022\024\n\014receive_time\030\001 \001(", + "\003\022\025\n\rresponse_time\030\002 \001(\003B5\n\036org.apache.h" + + "adoop.ipc.protobufB\020TestProtosLegacy\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hadoop_common_EmptyRequestProto_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hadoop_common_EmptyRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_EmptyRequestProto_descriptor, + new java.lang.String[] { }); + internal_static_hadoop_common_EmptyResponseProto_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_hadoop_common_EmptyResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_EmptyResponseProto_descriptor, + new java.lang.String[] { }); + internal_static_hadoop_common_EchoRequestProto_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_hadoop_common_EchoRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_EchoRequestProto_descriptor, + new java.lang.String[] { "Message", }); + internal_static_hadoop_common_EchoResponseProto_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_hadoop_common_EchoResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_EchoResponseProto_descriptor, + new java.lang.String[] { "Message", }); + internal_static_hadoop_common_OptRequestProto_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_hadoop_common_OptRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_OptRequestProto_descriptor, + new java.lang.String[] { "Message", }); + internal_static_hadoop_common_OptResponseProto_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_hadoop_common_OptResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_OptResponseProto_descriptor, + new java.lang.String[] { "Message", }); + internal_static_hadoop_common_SleepRequestProto_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_hadoop_common_SleepRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_SleepRequestProto_descriptor, + new java.lang.String[] { "MilliSeconds", }); + internal_static_hadoop_common_SleepResponseProto_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_hadoop_common_SleepResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_SleepResponseProto_descriptor, + new java.lang.String[] { }); + internal_static_hadoop_common_SlowPingRequestProto_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_hadoop_common_SlowPingRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_SlowPingRequestProto_descriptor, + new java.lang.String[] { "ShouldSlow", }); + internal_static_hadoop_common_EchoRequestProto2_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_hadoop_common_EchoRequestProto2_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_EchoRequestProto2_descriptor, + new java.lang.String[] { "Message", }); + internal_static_hadoop_common_EchoResponseProto2_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_hadoop_common_EchoResponseProto2_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_EchoResponseProto2_descriptor, + new java.lang.String[] { "Message", }); + internal_static_hadoop_common_AddRequestProto_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_hadoop_common_AddRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_AddRequestProto_descriptor, + new java.lang.String[] { "Param1", "Param2", }); + internal_static_hadoop_common_AddRequestProto2_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_hadoop_common_AddRequestProto2_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_AddRequestProto2_descriptor, + new java.lang.String[] { "Params", }); + internal_static_hadoop_common_AddResponseProto_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_hadoop_common_AddResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_AddResponseProto_descriptor, + new java.lang.String[] { "Result", }); + internal_static_hadoop_common_ExchangeRequestProto_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_hadoop_common_ExchangeRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_ExchangeRequestProto_descriptor, + new java.lang.String[] { "Values", }); + internal_static_hadoop_common_ExchangeResponseProto_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_hadoop_common_ExchangeResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_ExchangeResponseProto_descriptor, + new java.lang.String[] { "Values", }); + internal_static_hadoop_common_AuthMethodResponseProto_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_hadoop_common_AuthMethodResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_AuthMethodResponseProto_descriptor, + new java.lang.String[] { "Code", "MechanismName", }); + internal_static_hadoop_common_UserResponseProto_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_hadoop_common_UserResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_UserResponseProto_descriptor, + new java.lang.String[] { "User", }); + internal_static_hadoop_common_SleepRequestProto2_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_hadoop_common_SleepRequestProto2_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_SleepRequestProto2_descriptor, + new java.lang.String[] { "SleepTime", }); + internal_static_hadoop_common_SleepResponseProto2_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_hadoop_common_SleepResponseProto2_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_SleepResponseProto2_descriptor, + new java.lang.String[] { "ReceiveTime", "ResponseTime", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestRpcServiceProtosLegacy.java b/hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestRpcServiceProtosLegacy.java new file mode 100644 index 0000000000000..26cef9c75523c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/arm-java/org/apache/hadoop/ipc/protobuf/TestRpcServiceProtosLegacy.java @@ -0,0 +1,3313 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// This is class is added to source because for arm protoc 2.5.0 executable +// is not available to generate the same code. +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: test_rpc_service_legacy.proto + +package org.apache.hadoop.ipc.protobuf; + +public final class TestRpcServiceProtosLegacy { + private TestRpcServiceProtosLegacy() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + /** + * Protobuf service {@code hadoop.common.TestProtobufRpcProto} + * + *
    +   **
    +   * A protobuf service for use in tests
    +   * 
    + */ + public static abstract class TestProtobufRpcProto + implements com.google.protobuf.Service { + protected TestProtobufRpcProto() {} + + public interface Interface { + /** + * rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo(.hadoop.common.EchoRequestProto) returns (.hadoop.common.EchoResponseProto); + */ + public abstract void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc error(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void error( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc error2(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void error2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc slowPing(.hadoop.common.SlowPingRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void slowPing( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo2(.hadoop.common.EchoRequestProto2) returns (.hadoop.common.EchoResponseProto2); + */ + public abstract void echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 request, + com.google.protobuf.RpcCallback done); + + /** + * rpc add(.hadoop.common.AddRequestProto) returns (.hadoop.common.AddResponseProto); + */ + public abstract void add( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc add2(.hadoop.common.AddRequestProto2) returns (.hadoop.common.AddResponseProto); + */ + public abstract void add2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 request, + com.google.protobuf.RpcCallback done); + + /** + * rpc testServerGet(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void testServerGet( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc exchange(.hadoop.common.ExchangeRequestProto) returns (.hadoop.common.ExchangeResponseProto); + */ + public abstract void exchange( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc sleep(.hadoop.common.SleepRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc lockAndSleep(.hadoop.common.SleepRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void lockAndSleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc getAuthMethod(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.AuthMethodResponseProto); + */ + public abstract void getAuthMethod( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc getAuthUser(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.UserResponseProto); + */ + public abstract void getAuthUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echoPostponed(.hadoop.common.EchoRequestProto) returns (.hadoop.common.EchoResponseProto); + */ + public abstract void echoPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc sendPostponed(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void sendPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc getCurrentUser(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.UserResponseProto); + */ + public abstract void getCurrentUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc getServerRemoteUser(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.UserResponseProto); + */ + public abstract void getServerRemoteUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new TestProtobufRpcProto() { + @java.lang.Override + public void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.ping(controller, request, done); + } + + @java.lang.Override + public void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.echo(controller, request, done); + } + + @java.lang.Override + public void error( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.error(controller, request, done); + } + + @java.lang.Override + public void error2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.error2(controller, request, done); + } + + @java.lang.Override + public void slowPing( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.slowPing(controller, request, done); + } + + @java.lang.Override + public void echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 request, + com.google.protobuf.RpcCallback done) { + impl.echo2(controller, request, done); + } + + @java.lang.Override + public void add( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.add(controller, request, done); + } + + @java.lang.Override + public void add2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 request, + com.google.protobuf.RpcCallback done) { + impl.add2(controller, request, done); + } + + @java.lang.Override + public void testServerGet( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.testServerGet(controller, request, done); + } + + @java.lang.Override + public void exchange( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.exchange(controller, request, done); + } + + @java.lang.Override + public void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.sleep(controller, request, done); + } + + @java.lang.Override + public void lockAndSleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.lockAndSleep(controller, request, done); + } + + @java.lang.Override + public void getAuthMethod( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.getAuthMethod(controller, request, done); + } + + @java.lang.Override + public void getAuthUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.getAuthUser(controller, request, done); + } + + @java.lang.Override + public void echoPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.echoPostponed(controller, request, done); + } + + @java.lang.Override + public void sendPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.sendPostponed(controller, request, done); + } + + @java.lang.Override + public void getCurrentUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.getCurrentUser(controller, request, done); + } + + @java.lang.Override + public void getServerRemoteUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.getServerRemoteUser(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 1: + return impl.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto)request); + case 2: + return impl.error(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 3: + return impl.error2(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 4: + return impl.slowPing(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto)request); + case 5: + return impl.echo2(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2)request); + case 6: + return impl.add(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto)request); + case 7: + return impl.add2(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2)request); + case 8: + return impl.testServerGet(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 9: + return impl.exchange(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto)request); + case 10: + return impl.sleep(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto)request); + case 11: + return impl.lockAndSleep(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto)request); + case 12: + return impl.getAuthMethod(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 13: + return impl.getAuthUser(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 14: + return impl.echoPostponed(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto)request); + case 15: + return impl.sendPostponed(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 16: + return impl.getCurrentUser(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 17: + return impl.getServerRemoteUser(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 3: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto.getDefaultInstance(); + case 5: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2.getDefaultInstance(); + case 6: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto.getDefaultInstance(); + case 7: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2.getDefaultInstance(); + case 8: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 9: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto.getDefaultInstance(); + case 10: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.getDefaultInstance(); + case 11: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.getDefaultInstance(); + case 12: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 13: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 14: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.getDefaultInstance(); + case 15: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 16: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 17: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 3: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 5: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.getDefaultInstance(); + case 6: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance(); + case 7: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance(); + case 8: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 9: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.getDefaultInstance(); + case 10: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 11: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 12: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.getDefaultInstance(); + case 13: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance(); + case 14: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance(); + case 15: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 16: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance(); + case 17: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + /** + * rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo(.hadoop.common.EchoRequestProto) returns (.hadoop.common.EchoResponseProto); + */ + public abstract void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc error(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void error( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc error2(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void error2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc slowPing(.hadoop.common.SlowPingRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void slowPing( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo2(.hadoop.common.EchoRequestProto2) returns (.hadoop.common.EchoResponseProto2); + */ + public abstract void echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 request, + com.google.protobuf.RpcCallback done); + + /** + * rpc add(.hadoop.common.AddRequestProto) returns (.hadoop.common.AddResponseProto); + */ + public abstract void add( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc add2(.hadoop.common.AddRequestProto2) returns (.hadoop.common.AddResponseProto); + */ + public abstract void add2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 request, + com.google.protobuf.RpcCallback done); + + /** + * rpc testServerGet(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void testServerGet( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc exchange(.hadoop.common.ExchangeRequestProto) returns (.hadoop.common.ExchangeResponseProto); + */ + public abstract void exchange( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc sleep(.hadoop.common.SleepRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc lockAndSleep(.hadoop.common.SleepRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void lockAndSleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc getAuthMethod(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.AuthMethodResponseProto); + */ + public abstract void getAuthMethod( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc getAuthUser(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.UserResponseProto); + */ + public abstract void getAuthUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echoPostponed(.hadoop.common.EchoRequestProto) returns (.hadoop.common.EchoResponseProto); + */ + public abstract void echoPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc sendPostponed(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void sendPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc getCurrentUser(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.UserResponseProto); + */ + public abstract void getCurrentUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc getServerRemoteUser(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.UserResponseProto); + */ + public abstract void getServerRemoteUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: + this.error(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 3: + this.error2(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 4: + this.slowPing(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 5: + this.echo2(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 6: + this.add(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 7: + this.add2(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 8: + this.testServerGet(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 9: + this.exchange(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 10: + this.sleep(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 11: + this.lockAndSleep(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 12: + this.getAuthMethod(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 13: + this.getAuthUser(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 14: + this.echoPostponed(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 15: + this.sendPostponed(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 16: + this.getCurrentUser(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 17: + this.getServerRemoteUser(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 3: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto.getDefaultInstance(); + case 5: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2.getDefaultInstance(); + case 6: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto.getDefaultInstance(); + case 7: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2.getDefaultInstance(); + case 8: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 9: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto.getDefaultInstance(); + case 10: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.getDefaultInstance(); + case 11: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.getDefaultInstance(); + case 12: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 13: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 14: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.getDefaultInstance(); + case 15: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 16: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 17: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 3: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 5: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.getDefaultInstance(); + case 6: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance(); + case 7: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance(); + case 8: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 9: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.getDefaultInstance(); + case 10: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 11: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 12: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.getDefaultInstance(); + case 13: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance(); + case 14: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance(); + case 15: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 16: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance(); + case 17: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.TestProtobufRpcProto implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance())); + } + + public void error( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void error2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void slowPing( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.getDefaultInstance())); + } + + public void add( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance())); + } + + public void add2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance())); + } + + public void testServerGet( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void exchange( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(9), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.getDefaultInstance())); + } + + public void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(10), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void lockAndSleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(11), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void getAuthMethod( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(12), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.getDefaultInstance())); + } + + public void getAuthUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(13), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance())); + } + + public void echoPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(14), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance())); + } + + public void sendPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(15), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void getCurrentUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(16), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance())); + } + + public void getServerRemoteUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto error( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto error2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto slowPing( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto add( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto add2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto testServerGet( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto exchange( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto lockAndSleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto getAuthMethod( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto getAuthUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto echoPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto sendPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto getCurrentUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto getServerRemoteUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto error( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto error2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto slowPing( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SlowPingRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2 echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto2 request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2) channel.callBlockingMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto2.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto add( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto add2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddRequestProto2 request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AddResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto testServerGet( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto exchange( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(9), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.ExchangeResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(10), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto lockAndSleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(11), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto getAuthMethod( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(12), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.AuthMethodResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto getAuthUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(13), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto echoPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(14), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto sendPostponed( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(15), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto getCurrentUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(16), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto getServerRemoteUser( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.UserResponseProto.getDefaultInstance()); + } + + } + + // @@protoc_insertion_point(class_scope:hadoop.common.TestProtobufRpcProto) + } + + /** + * Protobuf service {@code hadoop.common.TestProtobufRpc2Proto} + */ + public static abstract class TestProtobufRpc2Proto + implements com.google.protobuf.Service { + protected TestProtobufRpc2Proto() {} + + public interface Interface { + /** + * rpc ping2(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo2(.hadoop.common.EchoRequestProto) returns (.hadoop.common.EchoResponseProto); + */ + public abstract void echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc sleep(.hadoop.common.SleepRequestProto) returns (.hadoop.common.SleepResponseProto); + */ + public abstract void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new TestProtobufRpc2Proto() { + @java.lang.Override + public void ping2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.ping2(controller, request, done); + } + + @java.lang.Override + public void echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.echo2(controller, request, done); + } + + @java.lang.Override + public void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.sleep(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.ping2(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 1: + return impl.echo2(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto)request); + case 2: + return impl.sleep(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + /** + * rpc ping2(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo2(.hadoop.common.EchoRequestProto) returns (.hadoop.common.EchoResponseProto); + */ + public abstract void echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc sleep(.hadoop.common.SleepRequestProto) returns (.hadoop.common.SleepResponseProto); + */ + public abstract void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.getDescriptor().getServices().get(1); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.ping2(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.echo2(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: + this.sleep(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.TestProtobufRpc2Proto implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void ping2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance())); + } + + public void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto echo2( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EchoResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto.getDefaultInstance()); + } + + } + + // @@protoc_insertion_point(class_scope:hadoop.common.TestProtobufRpc2Proto) + } + + /** + * Protobuf service {@code hadoop.common.OldProtobufRpcProto} + */ + public static abstract class OldProtobufRpcProto + implements com.google.protobuf.Service { + protected OldProtobufRpcProto() {} + + public interface Interface { + /** + * rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new OldProtobufRpcProto() { + @java.lang.Override + public void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.ping(controller, request, done); + } + + @java.lang.Override + public void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.echo(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 1: + return impl.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + /** + * rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.getDescriptor().getServices().get(2); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.OldProtobufRpcProto implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + } + + // @@protoc_insertion_point(class_scope:hadoop.common.OldProtobufRpcProto) + } + + /** + * Protobuf service {@code hadoop.common.NewProtobufRpcProto} + */ + public static abstract class NewProtobufRpcProto + implements com.google.protobuf.Service { + protected NewProtobufRpcProto() {} + + public interface Interface { + /** + * rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo(.hadoop.common.OptRequestProto) returns (.hadoop.common.OptResponseProto); + */ + public abstract void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new NewProtobufRpcProto() { + @java.lang.Override + public void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.ping(controller, request, done); + } + + @java.lang.Override + public void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.echo(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 1: + return impl.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + /** + * rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo(.hadoop.common.OptRequestProto) returns (.hadoop.common.OptResponseProto); + */ + public abstract void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.getDescriptor().getServices().get(3); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.NewProtobufRpcProto implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.OptResponseProto.getDefaultInstance()); + } + + } + + // @@protoc_insertion_point(class_scope:hadoop.common.NewProtobufRpcProto) + } + + /** + * Protobuf service {@code hadoop.common.NewerProtobufRpcProto} + */ + public static abstract class NewerProtobufRpcProto + implements com.google.protobuf.Service { + protected NewerProtobufRpcProto() {} + + public interface Interface { + /** + * rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new NewerProtobufRpcProto() { + @java.lang.Override + public void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.ping(controller, request, done); + } + + @java.lang.Override + public void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.echo(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + case 1: + return impl.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + /** + * rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + /** + * rpc echo(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.getDescriptor().getServices().get(4); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.NewerProtobufRpcProto implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + + public void echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto echo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + } + + // @@protoc_insertion_point(class_scope:hadoop.common.NewerProtobufRpcProto) + } + + /** + * Protobuf service {@code hadoop.common.CustomProto} + */ + public static abstract class CustomProto + implements com.google.protobuf.Service { + protected CustomProto() {} + + public interface Interface { + /** + * rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new CustomProto() { + @java.lang.Override + public void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.ping(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + /** + * rpc ping(.hadoop.common.EmptyRequestProto) returns (.hadoop.common.EmptyResponseProto); + */ + public abstract void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.getDescriptor().getServices().get(5); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.CustomProto implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto ping( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.EmptyResponseProto.getDefaultInstance()); + } + + } + + // @@protoc_insertion_point(class_scope:hadoop.common.CustomProto) + } + + /** + * Protobuf service {@code hadoop.common.TestProtobufRpcHandoffProto} + */ + public static abstract class TestProtobufRpcHandoffProto + implements com.google.protobuf.Service { + protected TestProtobufRpcHandoffProto() {} + + public interface Interface { + /** + * rpc sleep(.hadoop.common.SleepRequestProto2) returns (.hadoop.common.SleepResponseProto2); + */ + public abstract void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new TestProtobufRpcHandoffProto() { + @java.lang.Override + public void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 request, + com.google.protobuf.RpcCallback done) { + impl.sleep(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.sleep(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + /** + * rpc sleep(.hadoop.common.SleepRequestProto2) returns (.hadoop.common.SleepResponseProto2); + */ + public abstract void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.getDescriptor().getServices().get(6); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.sleep(controller, (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy.TestProtobufRpcHandoffProto implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.class, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2 sleep( + com.google.protobuf.RpcController controller, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepRequestProto2 request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.SleepResponseProto2.getDefaultInstance()); + } + + } + + // @@protoc_insertion_point(class_scope:hadoop.common.TestProtobufRpcHandoffProto) + } + + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\035test_rpc_service_legacy.proto\022\rhadoop." + + "common\032\021test_legacy.proto2\330\013\n\024TestProtob" + + "ufRpcProto\022K\n\004ping\022 .hadoop.common.Empty" + + "RequestProto\032!.hadoop.common.EmptyRespon" + + "seProto\022I\n\004echo\022\037.hadoop.common.EchoRequ" + + "estProto\032 .hadoop.common.EchoResponsePro" + + "to\022L\n\005error\022 .hadoop.common.EmptyRequest" + + "Proto\032!.hadoop.common.EmptyResponseProto" + + "\022M\n\006error2\022 .hadoop.common.EmptyRequestP" + + "roto\032!.hadoop.common.EmptyResponseProto\022", + "R\n\010slowPing\022#.hadoop.common.SlowPingRequ" + + "estProto\032!.hadoop.common.EmptyResponsePr" + + "oto\022L\n\005echo2\022 .hadoop.common.EchoRequest" + + "Proto2\032!.hadoop.common.EchoResponseProto" + + "2\022F\n\003add\022\036.hadoop.common.AddRequestProto" + + "\032\037.hadoop.common.AddResponseProto\022H\n\004add" + + "2\022\037.hadoop.common.AddRequestProto2\032\037.had" + + "oop.common.AddResponseProto\022T\n\rtestServe" + + "rGet\022 .hadoop.common.EmptyRequestProto\032!" + + ".hadoop.common.EmptyResponseProto\022U\n\010exc", + "hange\022#.hadoop.common.ExchangeRequestPro" + + "to\032$.hadoop.common.ExchangeResponseProto" + + "\022L\n\005sleep\022 .hadoop.common.SleepRequestPr" + + "oto\032!.hadoop.common.EmptyResponseProto\022S" + + "\n\014lockAndSleep\022 .hadoop.common.SleepRequ" + + "estProto\032!.hadoop.common.EmptyResponsePr" + + "oto\022Y\n\rgetAuthMethod\022 .hadoop.common.Emp" + + "tyRequestProto\032&.hadoop.common.AuthMetho" + + "dResponseProto\022Q\n\013getAuthUser\022 .hadoop.c" + + "ommon.EmptyRequestProto\032 .hadoop.common.", + "UserResponseProto\022R\n\rechoPostponed\022\037.had" + + "oop.common.EchoRequestProto\032 .hadoop.com" + + "mon.EchoResponseProto\022T\n\rsendPostponed\022 " + + ".hadoop.common.EmptyRequestProto\032!.hadoo" + + "p.common.EmptyResponseProto\022T\n\016getCurren" + + "tUser\022 .hadoop.common.EmptyRequestProto\032" + + " .hadoop.common.UserResponseProto\022Y\n\023get" + + "ServerRemoteUser\022 .hadoop.common.EmptyRe" + + "questProto\032 .hadoop.common.UserResponseP" + + "roto2\377\001\n\025TestProtobufRpc2Proto\022L\n\005ping2\022", + " .hadoop.common.EmptyRequestProto\032!.hado" + + "op.common.EmptyResponseProto\022J\n\005echo2\022\037." + + "hadoop.common.EchoRequestProto\032 .hadoop." + + "common.EchoResponseProto\022L\n\005sleep\022 .hado" + + "op.common.SleepRequestProto\032!.hadoop.com" + + "mon.SleepResponseProto2\257\001\n\023OldProtobufRp" + + "cProto\022K\n\004ping\022 .hadoop.common.EmptyRequ" + + "estProto\032!.hadoop.common.EmptyResponsePr" + + "oto\022K\n\004echo\022 .hadoop.common.EmptyRequest" + + "Proto\032!.hadoop.common.EmptyResponseProto", + "2\253\001\n\023NewProtobufRpcProto\022K\n\004ping\022 .hadoo" + + "p.common.EmptyRequestProto\032!.hadoop.comm" + + "on.EmptyResponseProto\022G\n\004echo\022\036.hadoop.c" + + "ommon.OptRequestProto\032\037.hadoop.common.Op" + + "tResponseProto2\261\001\n\025NewerProtobufRpcProto" + + "\022K\n\004ping\022 .hadoop.common.EmptyRequestPro" + + "to\032!.hadoop.common.EmptyResponseProto\022K\n" + + "\004echo\022 .hadoop.common.EmptyRequestProto\032" + + "!.hadoop.common.EmptyResponseProto2Z\n\013Cu" + + "stomProto\022K\n\004ping\022 .hadoop.common.EmptyR", + "equestProto\032!.hadoop.common.EmptyRespons" + + "eProto2m\n\033TestProtobufRpcHandoffProto\022N\n" + + "\005sleep\022!.hadoop.common.SleepRequestProto" + + "2\032\".hadoop.common.SleepResponseProto2BB\n" + + "\036org.apache.hadoop.ipc.protobufB\032TestRpc" + + "ServiceProtosLegacy\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.ipc.protobuf.TestProtosLegacy.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java index 06c3646310412..0740f056c8fc9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java @@ -17,12 +17,10 @@ */ package org.apache.hadoop.ipc; -import org.apache.hadoop.thirdparty.protobuf.BlockingService; -import org.apache.hadoop.thirdparty.protobuf.RpcController; -import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.ipc.RPC.RpcKind; import org.apache.hadoop.ipc.metrics.RpcMetrics; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto; import org.apache.hadoop.ipc.protobuf.TestProtos; @@ -30,38 +28,71 @@ import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto; +import org.apache.hadoop.ipc.protobuf.TestProtosLegacy; import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpc2Proto; import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto; +import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtosLegacy; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.thirdparty.protobuf.BlockingService; +import org.apache.hadoop.thirdparty.protobuf.RpcController; +import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; import java.io.IOException; import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Collection; import java.util.concurrent.TimeoutException; -import static org.assertj.core.api.Assertions.assertThat; import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; /** * Test for testing protocol buffer based RPC mechanism. * This test depends on test.proto definition of types in src/test/proto * and protobuf service definition from src/test/test_rpc_service.proto */ +@RunWith(Parameterized.class) public class TestProtoBufRpc extends TestRpcBase { private static RPC.Server server; private final static int SLEEP_DURATION = 1000; + /** + * Test with legacy protobuf implementation in same server. + */ + private boolean testWithLegacy; + /** + * Test with legacy protobuf implementation loaded first while creating the + * RPC server. + */ + private boolean testWithLegacyFirst; + + public TestProtoBufRpc(Boolean testWithLegacy, Boolean testWithLegacyFirst) { + this.testWithLegacy = testWithLegacy; + this.testWithLegacyFirst = testWithLegacyFirst; + } + @ProtocolInfo(protocolName = "testProto2", protocolVersion = 1) public interface TestRpcService2 extends TestProtobufRpc2Proto.BlockingInterface { } + @ProtocolInfo(protocolName="testProtoLegacy", protocolVersion = 1) + public interface TestRpcService2Legacy + extends TestRpcServiceProtosLegacy. + TestProtobufRpc2Proto.BlockingInterface { + } + public static class PBServer2Impl implements TestRpcService2 { @Override @@ -88,12 +119,58 @@ public TestProtos.SleepResponseProto sleep(RpcController controller, } } + public static class PBServer2ImplLegacy implements TestRpcService2Legacy { + + @Override + public TestProtosLegacy.EmptyResponseProto ping2( + com.google.protobuf.RpcController unused, + TestProtosLegacy.EmptyRequestProto request) + throws com.google.protobuf.ServiceException { + return TestProtosLegacy.EmptyResponseProto.newBuilder().build(); + } + + @Override + public TestProtosLegacy.EchoResponseProto echo2( + com.google.protobuf.RpcController unused, + TestProtosLegacy.EchoRequestProto request) + throws com.google.protobuf.ServiceException { + return TestProtosLegacy.EchoResponseProto.newBuilder() + .setMessage(request.getMessage()).build(); + } + + @Override + public TestProtosLegacy.SleepResponseProto sleep( + com.google.protobuf.RpcController controller, + TestProtosLegacy.SleepRequestProto request) + throws com.google.protobuf.ServiceException { + try { + Thread.sleep(request.getMilliSeconds()); + } catch (InterruptedException ex) { + } + return TestProtosLegacy.SleepResponseProto.newBuilder().build(); + } + } + + @Parameters + public static Collection params() { + Collection params = new ArrayList(); + params.add(new Object[] {Boolean.TRUE, Boolean.TRUE }); + params.add(new Object[] {Boolean.TRUE, Boolean.FALSE }); + params.add(new Object[] {Boolean.FALSE, Boolean.FALSE }); + return params; + } + @Before + @SuppressWarnings("deprecation") public void setUp() throws IOException { // Setup server for both protocols conf = new Configuration(); conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024); conf.setBoolean(CommonConfigurationKeys.IPC_SERVER_LOG_SLOW_RPC, true); // Set RPC engine to protobuf RPC engine + if (testWithLegacy) { + RPC.setProtocolEngine(conf, TestRpcService2Legacy.class, + ProtobufRpcEngine.class); + } RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, TestRpcService2.class, ProtobufRpcEngine2.class); @@ -103,9 +180,21 @@ public void setUp() throws IOException { // Setup server for both protocols BlockingService service = TestProtobufRpcProto .newReflectiveBlockingService(serverImpl); - // Get RPC server for server side implementation - server = new RPC.Builder(conf).setProtocol(TestRpcService.class) - .setInstance(service).setBindAddress(ADDRESS).setPort(PORT).build(); + if (testWithLegacy && testWithLegacyFirst) { + PBServer2ImplLegacy server2ImplLegacy = new PBServer2ImplLegacy(); + com.google.protobuf.BlockingService legacyService = + TestRpcServiceProtosLegacy.TestProtobufRpc2Proto + .newReflectiveBlockingService(server2ImplLegacy); + server = new RPC.Builder(conf).setProtocol(TestRpcService2Legacy.class) + .setInstance(legacyService).setBindAddress(ADDRESS).setPort(PORT) + .build(); + server.addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService.class, + service); + } else { + // Get RPC server for server side implementation + server = new RPC.Builder(conf).setProtocol(TestRpcService.class) + .setInstance(service).setBindAddress(ADDRESS).setPort(PORT).build(); + } addr = NetUtils.getConnectAddress(server); // now the second protocol @@ -115,6 +204,16 @@ public void setUp() throws IOException { // Setup server for both protocols server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService2.class, service2); + + if (testWithLegacy && !testWithLegacyFirst) { + PBServer2ImplLegacy server2ImplLegacy = new PBServer2ImplLegacy(); + com.google.protobuf.BlockingService legacyService = + TestRpcServiceProtosLegacy.TestProtobufRpc2Proto + .newReflectiveBlockingService(server2ImplLegacy); + server + .addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService2Legacy.class, + legacyService); + } server.start(); } @@ -128,6 +227,10 @@ private TestRpcService2 getClient2() throws IOException { return RPC.getProxy(TestRpcService2.class, 0, addr, conf); } + private TestRpcService2Legacy getClientLegacy() throws IOException { + return RPC.getProxy(TestRpcService2Legacy.class, 0, addr, conf); + } + @Test (timeout=5000) public void testProtoBufRpc() throws Exception { TestRpcService client = getClient(addr, conf); @@ -179,10 +282,39 @@ public void testProtoBufRpc2() throws Exception { MetricsRecordBuilder rpcDetailedMetrics = getMetrics(server.getRpcDetailedMetrics().name()); assertCounterGt("Echo2NumOps", 0L, rpcDetailedMetrics); + + if (testWithLegacy) { + testProtobufLegacy(); + } + } + + private void testProtobufLegacy() + throws IOException, com.google.protobuf.ServiceException { + TestRpcService2Legacy client = getClientLegacy(); + + // Test ping method + client.ping2(null, TestProtosLegacy.EmptyRequestProto.newBuilder().build()); + + // Test echo method + TestProtosLegacy.EchoResponseProto echoResponse = client.echo2(null, + TestProtosLegacy.EchoRequestProto.newBuilder().setMessage("hello") + .build()); + assertThat(echoResponse.getMessage()).isEqualTo("hello"); + + // Ensure RPC metrics are updated + MetricsRecordBuilder rpcMetrics = getMetrics(server.getRpcMetrics().name()); + assertCounterGt("RpcQueueTimeNumOps", 0L, rpcMetrics); + assertCounterGt("RpcProcessingTimeNumOps", 0L, rpcMetrics); + + MetricsRecordBuilder rpcDetailedMetrics = + getMetrics(server.getRpcDetailedMetrics().name()); + assertCounterGt("Echo2NumOps", 0L, rpcDetailedMetrics); } @Test (timeout=5000) public void testProtoBufRandomException() throws Exception { + //No test with legacy + assumeFalse(testWithLegacy); TestRpcService client = getClient(addr, conf); try { @@ -200,6 +332,8 @@ public void testProtoBufRandomException() throws Exception { @Test(timeout=6000) public void testExtraLongRpc() throws Exception { + //No test with legacy + assumeFalse(testWithLegacy); TestRpcService2 client = getClient2(); final String shortString = StringUtils.repeat("X", 4); // short message goes through @@ -219,6 +353,8 @@ public void testExtraLongRpc() throws Exception { @Test(timeout = 12000) public void testLogSlowRPC() throws IOException, ServiceException, TimeoutException, InterruptedException { + //No test with legacy + assumeFalse(testWithLegacy); TestRpcService2 client = getClient2(); // make 10 K fast calls for (int x = 0; x < 10000; x++) { @@ -244,6 +380,8 @@ public void testLogSlowRPC() throws IOException, ServiceException, @Test(timeout = 12000) public void testEnsureNoLogIfDisabled() throws IOException, ServiceException { + //No test with legacy + assumeFalse(testWithLegacy); // disable slow RPC logging server.setLogSlowRPC(false); TestRpcService2 client = getClient2(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java index 922e9192c41c6..4328655270921 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java @@ -145,7 +145,7 @@ public static class TestProtoBufRpcServerHandoffServer ServiceException { final long startTime = System.currentTimeMillis(); final ProtobufRpcEngineCallback2 callback = - ProtobufRpcEngine2.Server.registerForDeferredResponse(); + ProtobufRpcEngine2.Server.registerForDeferredResponse2(); final long sleepTime = request.getSleepTime(); new Thread() { @Override diff --git a/hadoop-common-project/hadoop-common/src/test/proto/test_legacy.proto b/hadoop-common-project/hadoop-common/src/test/proto/test_legacy.proto new file mode 100644 index 0000000000000..7d585e30c2bb7 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/proto/test_legacy.proto @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +option java_package = "org.apache.hadoop.ipc.protobuf"; +option java_outer_classname = "TestProtosLegacy"; +option java_generate_equals_and_hash = true; +package hadoop.common; + +message EmptyRequestProto { +} + +message EmptyResponseProto { +} + +message EchoRequestProto { + required string message = 1; +} + +message EchoResponseProto { + required string message = 1; +} + +message OptRequestProto { + optional string message = 1; +} + +message OptResponseProto { + optional string message = 1; +} + +message SleepRequestProto{ + required int32 milliSeconds = 1; +} + +message SleepResponseProto{ +} + +message SlowPingRequestProto { + required bool shouldSlow = 1; +} + +message EchoRequestProto2 { + repeated string message = 1; +} + +message EchoResponseProto2 { + repeated string message = 1; +} + +message AddRequestProto { + required int32 param1 = 1; + required int32 param2 = 2; +} + +message AddRequestProto2 { + repeated int32 params = 1; +} + +message AddResponseProto { + required int32 result = 1; +} + +message ExchangeRequestProto { + repeated int32 values = 1; +} + +message ExchangeResponseProto { + repeated int32 values = 1; +} + +message AuthMethodResponseProto { + required int32 code = 1; + required string mechanismName = 2; +} + +message UserResponseProto { + required string user = 1; +} + +message SleepRequestProto2 { + optional int64 sleep_time = 1; +} + +message SleepResponseProto2 { + optional int64 receive_time = 1; + optional int64 response_time = 2; +} diff --git a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service_legacy.proto b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service_legacy.proto new file mode 100644 index 0000000000000..95fd6bbe59352 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service_legacy.proto @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +option java_package = "org.apache.hadoop.ipc.protobuf"; +option java_outer_classname = "TestRpcServiceProtosLegacy"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +package hadoop.common; + +import "test_legacy.proto"; + + +/** + * A protobuf service for use in tests + */ +service TestProtobufRpcProto { + rpc ping(EmptyRequestProto) returns (EmptyResponseProto); + rpc echo(EchoRequestProto) returns (EchoResponseProto); + rpc error(EmptyRequestProto) returns (EmptyResponseProto); + rpc error2(EmptyRequestProto) returns (EmptyResponseProto); + rpc slowPing(SlowPingRequestProto) returns (EmptyResponseProto); + rpc echo2(EchoRequestProto2) returns (EchoResponseProto2); + rpc add(AddRequestProto) returns (AddResponseProto); + rpc add2(AddRequestProto2) returns (AddResponseProto); + rpc testServerGet(EmptyRequestProto) returns (EmptyResponseProto); + rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto); + rpc sleep(SleepRequestProto) returns (EmptyResponseProto); + rpc lockAndSleep(SleepRequestProto) returns (EmptyResponseProto); + rpc getAuthMethod(EmptyRequestProto) returns (AuthMethodResponseProto); + rpc getAuthUser(EmptyRequestProto) returns (UserResponseProto); + rpc echoPostponed(EchoRequestProto) returns (EchoResponseProto); + rpc sendPostponed(EmptyRequestProto) returns (EmptyResponseProto); + rpc getCurrentUser(EmptyRequestProto) returns (UserResponseProto); + rpc getServerRemoteUser(EmptyRequestProto) returns (UserResponseProto); +} + +service TestProtobufRpc2Proto { + rpc ping2(EmptyRequestProto) returns (EmptyResponseProto); + rpc echo2(EchoRequestProto) returns (EchoResponseProto); + rpc sleep(SleepRequestProto) returns (SleepResponseProto); +} + +service OldProtobufRpcProto { + rpc ping(EmptyRequestProto) returns (EmptyResponseProto); + rpc echo(EmptyRequestProto) returns (EmptyResponseProto); +} + +service NewProtobufRpcProto { + rpc ping(EmptyRequestProto) returns (EmptyResponseProto); + rpc echo(OptRequestProto) returns (OptResponseProto); +} + +service NewerProtobufRpcProto { + rpc ping(EmptyRequestProto) returns (EmptyResponseProto); + rpc echo(EmptyRequestProto) returns (EmptyResponseProto); +} + +service CustomProto { + rpc ping(EmptyRequestProto) returns (EmptyResponseProto); +} + +service TestProtobufRpcHandoffProto { + rpc sleep(SleepRequestProto2) returns (SleepResponseProto2); +} From a534fad32105e071dbbdbf9bd5e60f5dc5d2b527 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 24 May 2021 03:19:58 -0700 Subject: [PATCH 344/666] HADOOP-17723. [build] fix the Dockerfile for ARM (#3037) Reviewed-by: Akira Ajisaka --- dev-support/docker/Dockerfile_aarch64 | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index 710c989c7b624..5c2613268a65b 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -73,8 +73,10 @@ RUN apt-get -q update \ npm \ openjdk-11-jdk \ openjdk-8-jdk \ + phantomjs \ pinentry-curses \ pkg-config \ + python2.7 \ python3 \ python3-pip \ python3-pkg-resources \ @@ -154,17 +156,6 @@ RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 # hadolint ignore=DL3008 RUN npm install -g bower@1.8.8 -### -# Install phantomjs built for aarch64 -#### -RUN mkdir -p /opt/phantomjs \ - && curl -L -s -S \ - https://github.com/liusheng/phantomjs/releases/download/2.1.1/phantomjs-2.1.1-linux-aarch64.tar.bz2 \ - -o /opt/phantomjs/phantomjs-2.1.1-linux-aarch64.tar.bz2 \ - && tar xvjf /opt/phantomjs/phantomjs-2.1.1-linux-aarch64.tar.bz2 --strip-components 1 -C /opt/phantomjs \ - && cp /opt/phantomjs/bin/phantomjs /usr/bin/ \ - && rm -rf /opt/phantomjs - ### # Avoid out of memory errors in builds ### @@ -173,6 +164,9 @@ ENV MAVEN_OPTS -Xms256m -Xmx1536m # Skip gpg verification when downloading Yetus via yetus-wrapper ENV HADOOP_SKIP_YETUS_VERIFICATION true +# Force PhantomJS to be in 'headless' mode, do not connect to Xwindow +ENV QT_QPA_PLATFORM offscreen + ### # Everything past this point is either not needed for testing or breaks Yetus. # So tell Yetus not to read the rest of the file: From 0e2924591a6d448f0e95f1363ae737a4a7dde258 Mon Sep 17 00:00:00 2001 From: Mehakmeet Singh Date: Mon, 24 May 2021 17:32:11 +0530 Subject: [PATCH 345/666] HADOOP-17670. S3AFS and ABFS to log IOStats at DEBUG mode or optionally at INFO level in close() (#2963) When the S3A and ABFS filesystems are closed, their IOStatistics are logged at debug in the log: org.apache.hadoop.fs.statistics.IOStatisticsLogging Set `fs.iostatistics.logging.level` to `info` for the statistics to be logged at info. (also: `warn` or `error` for even higher log levels). Contributed by: Mehakmeet Singh --- .../hadoop/fs/CommonConfigurationKeys.java | 24 ++++++++++++++ .../fs/statistics/IOStatisticsLogging.java | 31 +++++++++++++++++++ .../apache/hadoop/fs/s3a/S3AFileSystem.java | 9 ++++++ .../fs/azurebfs/AzureBlobFileSystem.java | 14 ++++++--- .../azurebfs/services/AbfsOutputStream.java | 4 +-- .../fs/azurebfs/ITestAbfsStatistics.java | 12 +++++++ 6 files changed, 87 insertions(+), 7 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index 07776763e97a4..b5dc29cea932d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -438,4 +438,28 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { "hadoop.metrics.jvm.use-thread-mxbean"; public static final boolean HADOOP_METRICS_JVM_USE_THREAD_MXBEAN_DEFAULT = false; + + /** logging level for IOStatistics (debug or info). */ + public static final String IOSTATISTICS_LOGGING_LEVEL + = "fs.iostatistics.logging.level"; + + /** DEBUG logging level for IOStatistics logging. */ + public static final String IOSTATISTICS_LOGGING_LEVEL_DEBUG + = "debug"; + + /** WARN logging level for IOStatistics logging. */ + public static final String IOSTATISTICS_LOGGING_LEVEL_WARN + = "warn"; + + /** ERROR logging level for IOStatistics logging. */ + public static final String IOSTATISTICS_LOGGING_LEVEL_ERROR + = "error"; + + /** INFO logging level for IOStatistics logging. */ + public static final String IOSTATISTICS_LOGGING_LEVEL_INFO + = "info"; + + /** Default value for IOStatistics logging level. */ + public static final String IOSTATISTICS_LOGGING_LEVEL_DEFAULT + = IOSTATISTICS_LOGGING_LEVEL_DEBUG; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsLogging.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsLogging.java index c7230e25c3434..df063f1fa832b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsLogging.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsLogging.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.statistics; import javax.annotation.Nullable; +import java.util.Locale; import java.util.Map; import java.util.TreeMap; import java.util.function.Predicate; @@ -30,6 +31,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL_ERROR; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL_INFO; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL_WARN; import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; /** @@ -249,6 +253,33 @@ public static void logIOStatisticsAtDebug( logIOStatisticsAtDebug(LOG, message, source); } + /** + * A method to log IOStatistics from a source at different levels. + * + * @param log Logger for logging. + * @param level LOG level. + * @param source Source to LOG. + */ + public static void logIOStatisticsAtLevel(Logger log, String level, + Object source) { + IOStatistics stats = retrieveIOStatistics(source); + if (stats != null) { + switch (level.toLowerCase(Locale.US)) { + case IOSTATISTICS_LOGGING_LEVEL_INFO: + LOG.info("IOStatistics: {}", ioStatisticsToPrettyString(stats)); + break; + case IOSTATISTICS_LOGGING_LEVEL_ERROR: + LOG.error("IOStatistics: {}", ioStatisticsToPrettyString(stats)); + break; + case IOSTATISTICS_LOGGING_LEVEL_WARN: + LOG.warn("IOStatistics: {}", ioStatisticsToPrettyString(stats)); + break; + default: + logIOStatisticsAtDebug(log, "IOStatistics: {}", source); + } + } + } + /** * On demand stringifier. *

    diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 7f0160a5070f6..751038bb6d3c3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -177,6 +177,8 @@ import org.apache.hadoop.util.concurrent.HadoopExecutors; import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL_DEFAULT; import static org.apache.hadoop.fs.impl.AbstractFSBuilderImpl.rejectUnknownMandatoryKeys; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; import static org.apache.hadoop.fs.s3a.Constants.*; @@ -199,6 +201,7 @@ import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.fixBucketRegion; import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.logDnsLookup; import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.dirMetaToStatuses; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.logIOStatisticsAtLevel; import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_CONTINUE_LIST_REQUEST; import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_LIST_REQUEST; import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.pairedTrackerFactory; @@ -3538,6 +3541,12 @@ public void close() throws IOException { } isClosed = true; LOG.debug("Filesystem {} is closed", uri); + if (getConf() != null) { + String iostatisticsLoggingLevel = + getConf().getTrimmed(IOSTATISTICS_LOGGING_LEVEL, + IOSTATISTICS_LOGGING_LEVEL_DEFAULT); + logIOStatisticsAtLevel(LOG, iostatisticsLoggingLevel, getIOStatistics()); + } try { super.close(); } finally { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index 2a8d58678a203..b1d2a333bdf9d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -41,6 +41,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; @@ -84,7 +85,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.fs.statistics.IOStatisticsSource; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.UserGroupInformation; @@ -93,9 +93,11 @@ import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.util.Progressable; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL_DEFAULT; import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.*; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; -import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.logIOStatisticsAtLevel; /** * A {@link org.apache.hadoop.fs.FileSystem} for reading and writing files stored on Date: Mon, 24 May 2021 17:38:45 +0530 Subject: [PATCH 346/666] HADOOP-17705. S3A to add Config to set AWS region (#3020) The option `fs.s3a.endpoint.region` can be used to explicitly set the AWS region of a bucket. This is needed when using AWS Private Link, as the region cannot be automatically determined. Contributed by Mehakmeet Singh --- .../org/apache/hadoop/fs/s3a/Constants.java | 6 ++ .../hadoop/fs/s3a/DefaultS3ClientFactory.java | 31 ++++--- .../site/markdown/tools/hadoop-aws/index.md | 8 ++ .../tools/hadoop-aws/troubleshooting_s3a.md | 26 ++++++ .../hadoop/fs/s3a/ITestS3AEndpointRegion.java | 91 +++++++++++++++++++ .../fs/s3a/impl/TestNetworkBinding.java | 2 +- 6 files changed, 150 insertions(+), 14 deletions(-) create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index f6900cb1c8e19..8dc6bba1ccef5 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -1081,4 +1081,10 @@ private Constants() { */ public static final String XA_HEADER_PREFIX = "header."; + /** + * AWS S3 region for the bucket. When set bypasses the construction of + * region through endpoint url. + */ + public static final String AWS_REGION = "fs.s3a.endpoint.region"; + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java index ae50bd1459bcd..6e84497cb99d0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java @@ -42,6 +42,7 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.s3a.statistics.impl.AwsStatisticsCollector; +import static org.apache.hadoop.fs.s3a.Constants.AWS_REGION; import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING; import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING_DEFAULT; @@ -132,7 +133,7 @@ protected AmazonS3 buildAmazonS3Client( // endpoint set up is a PITA AwsClientBuilder.EndpointConfiguration epr = createEndpointConfiguration(parameters.getEndpoint(), - awsConf); + awsConf, getConf().getTrimmed(AWS_REGION)); if (epr != null) { // an endpoint binding was constructed: use it. b.withEndpointConfiguration(epr); @@ -197,12 +198,14 @@ protected static AmazonS3 configureAmazonS3Client(AmazonS3 s3, * * @param endpoint possibly null endpoint. * @param awsConf config to build the URI from. + * @param awsRegion AWS S3 Region if the corresponding config is set. * @return a configuration for the S3 client builder. */ @VisibleForTesting public static AwsClientBuilder.EndpointConfiguration createEndpointConfiguration( - final String endpoint, final ClientConfiguration awsConf) { + final String endpoint, final ClientConfiguration awsConf, + String awsRegion) { LOG.debug("Creating endpoint configuration for {}", endpoint); if (endpoint == null || endpoint.isEmpty()) { // the default endpoint...we should be using null at this point. @@ -212,17 +215,19 @@ protected static AmazonS3 configureAmazonS3Client(AmazonS3 s3, final URI epr = RuntimeHttpUtils.toUri(endpoint, awsConf); LOG.debug("Endpoint URI = {}", epr); - - String region; - if (!ServiceUtils.isS3USStandardEndpoint(endpoint)) { - LOG.debug("Endpoint {} is not the default; parsing", epr); - region = AwsHostNameUtils.parseRegion( - epr.getHost(), - S3_SERVICE_NAME); - } else { - // US-east, set region == null. - LOG.debug("Endpoint {} is the standard one; declare region as null", epr); - region = null; + String region = awsRegion; + if (StringUtils.isBlank(region)) { + if (!ServiceUtils.isS3USStandardEndpoint(endpoint)) { + LOG.debug("Endpoint {} is not the default; parsing", epr); + region = AwsHostNameUtils.parseRegion( + epr.getHost(), + S3_SERVICE_NAME); + } else { + // US-east, set region == null. + LOG.debug("Endpoint {} is the standard one; declare region as null", + epr); + region = null; + } } LOG.debug("Region for endpoint {}, URI {} is determined as {}", endpoint, epr, region); diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md index 9258c47c0d81a..c68c57d95617f 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md @@ -797,6 +797,14 @@ options are covered in [Testing](./testing.md). + + fs.s3a.endpoint.region + AWS S3 region for a bucket, which bypasses the parsing of + fs.s3a.endpoint to know the region. Would be helpful in avoiding errors + while using privateLink URL and explicitly set the bucket region. + + + fs.s3a.path.style.access false diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md index 661dd2f36ad2b..d91607d3fd19d 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md @@ -247,6 +247,32 @@ As an example, the endpoint for S3 Frankfurt is `s3.eu-central-1.amazonaws.com`: s3.eu-central-1.amazonaws.com ``` +### "Authorization Header is Malformed"(400) exception when PrivateLink URL is used in "fs.s3a.endpoint" + +When [PrivateLink](https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html) URL +is used instead of standard s3a endpoint, it returns "authorization +header is malformed" exception. So, if we set fs.s3a.endpoint=bucket.vpce +-.s3.ca-central-1.vpce.amazonaws.com and make s3 calls we get: +``` +com.amazonaws.services.s3.model.AmazonS3Exception: The authorization header is malformed; the region 'vpce' is wrong; expecting 'ca-central-1' +(Service: Amazon S3; Status Code: 400; Error Code: AuthorizationHeaderMalformed; Request ID: req-id; S3 Extended Request ID: req-id-2), S3 Extended Request ID: req-id-2:AuthorizationHeaderMalformed: The authorization +header is malformed; the region 'vpce' is wrong; expecting 'ca-central-1' (Service: Amazon S3; Status Code: 400; Error Code: AuthorizationHeaderMalformed; Request ID: req-id; +``` +Cause: + +Since, endpoint parsing is done in a way that it assumes the AWS S3 region +would be the 2nd component of the `fs.s3a.endpoint` URL delimited by ".", in +case of PrivateLink URL, it can't figure out the region and throws an +authorization exception. Thus, to add support to using PrivateLink URLs we use `fs.s3a.endpoint.region` +to set the region and bypass this parsing of `fs.s3a.endpoint`, in the case shown above to make it work we'll set the AWS +S3 region as `ca-central-1`. + +```xml + + fs.s3a.endpoint.region + ca-central-1 + +``` ### `Class does not implement AWSCredentialsProvider` diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java new file mode 100644 index 0000000000000..abd637a5240cb --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.util.AwsHostNameUtils; +import org.assertj.core.api.Assertions; +import org.junit.Test; + +import static org.apache.hadoop.fs.s3a.Constants.AWS_REGION; + +/** + * Test to check correctness of S3A endpoint regions in + * {@link DefaultS3ClientFactory}. + */ +public class ITestS3AEndpointRegion extends AbstractS3ATestBase { + + private static final String AWS_REGION_TEST = "test-region"; + private static final String AWS_ENDPOINT_TEST = "test-endpoint"; + private static final String AWS_ENDPOINT_TEST_WITH_REGION = + "test-endpoint.some-region.amazonaws.com"; + + /** + * Test to verify that setting a region with the config would bypass the + * construction of region from endpoint. + */ + @Test + public void testWithRegionConfig() { + getFileSystem().getConf().set(AWS_REGION, AWS_REGION_TEST); + + //Creating an endpoint config with a custom endpoint. + AwsClientBuilder.EndpointConfiguration epr = createEpr(AWS_ENDPOINT_TEST, + getFileSystem().getConf().getTrimmed(AWS_REGION)); + //Checking if setting region config bypasses the endpoint region. + Assertions.assertThat(epr.getSigningRegion()) + .describedAs("There is a region mismatch") + .isEqualTo(getFileSystem().getConf().get(AWS_REGION)); + } + + /** + * Test to verify that not setting the region config, would lead to using + * endpoint to construct the region. + */ + @Test + public void testWithoutRegionConfig() { + getFileSystem().getConf().unset(AWS_REGION); + + //Creating an endpoint config with a custom endpoint containing a region. + AwsClientBuilder.EndpointConfiguration eprRandom = + createEpr(AWS_ENDPOINT_TEST_WITH_REGION, + getFileSystem().getConf().getTrimmed(AWS_REGION)); + String regionFromEndpoint = + AwsHostNameUtils + .parseRegionFromAwsPartitionPattern(AWS_ENDPOINT_TEST_WITH_REGION); + //Checking if not setting region config leads to constructing the region + // from endpoint. + Assertions.assertThat(eprRandom.getSigningRegion()) + .describedAs("There is a region mismatch") + .isNotEqualTo(getFileSystem().getConf().get(AWS_REGION)) + .isEqualTo(regionFromEndpoint); + } + + /** + * Method to create EndpointConfiguration using an endpoint. + * + * @param endpoint the endpoint to be used for EndpointConfiguration creation. + * @return an instance of EndpointConfiguration. + */ + private AwsClientBuilder.EndpointConfiguration createEpr(String endpoint, + String awsRegion) { + return DefaultS3ClientFactory.createEndpointConfiguration(endpoint, + new ClientConfiguration(), awsRegion); + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestNetworkBinding.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestNetworkBinding.java index 10fe339174fc5..7f51d2b45362c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestNetworkBinding.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestNetworkBinding.java @@ -85,7 +85,7 @@ public void expectEndpoint(final String src, final boolean expectNull, final String expectRegion) { AwsClientBuilder.EndpointConfiguration epr = - createEndpointConfiguration(src, new ClientConfiguration()); + createEndpointConfiguration(src, new ClientConfiguration(), src); String eprStr = epr == null ? "(empty)" : ("(" + epr.getServiceEndpoint() + " " + epr.getSigningRegion()); From 524bbe1bd8785bb15086c1d6f4ec20f7f4f296da Mon Sep 17 00:00:00 2001 From: zhuqi-lucas <821684824@qq.com> Date: Mon, 24 May 2021 23:12:07 +0800 Subject: [PATCH 347/666] YARN-10771. Add cluster metric for size of SchedulerEventQueue and RMEventQueue. Contributed by chaosju. --- .../hadoop/yarn/event/AsyncDispatcher.java | 4 ++++ .../hadoop/yarn/event/EventDispatcher.java | 4 ++++ .../resourcemanager/ClusterMetrics.java | 20 ++++++++++++++++ .../resourcemanager/ResourceManager.java | 24 +++++++++++++++++++ .../webapp/MetricsOverviewTable.java | 8 +++++++ .../webapp/dao/ClusterMetricsInfo.java | 13 ++++++++++ .../resourcemanager/webapp/TestNodesPage.java | 2 +- .../webapp/TestRMWebServices.java | 2 +- 8 files changed, 75 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index ba6bb435ec25b..0915eb4a981fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -409,4 +409,8 @@ public void addMetrics(EventTypeMetrics metrics, Class eventClass) { eventTypeMetricsMap.put(eventClass, metrics); } + + public int getEventQueueSize() { + return eventQueue.size(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java index 11cdf150ddb74..6731bdacac880 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java @@ -161,4 +161,8 @@ protected long getEventProcessorId() { protected boolean isStopped() { return this.stopped; } + + public int getEventQueueSize() { + return eventQueue.size(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java index 67a3a620875d7..fa3c5c90949cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java @@ -72,6 +72,10 @@ public class ClusterMetrics { rmEventProcCPUMax; @Metric("# of Containers assigned in the last second") MutableGaugeInt containerAssignedPerSecond; + @Metric("# of rm dispatcher event queue size") + MutableGaugeInt rmDispatcherEventQueueSize; + @Metric("# of scheduler dispatcher event queue size") + MutableGaugeInt schedulerDispatcherEventQueueSize; private boolean rmEventProcMonitorEnable = false; @@ -356,4 +360,20 @@ public void incrNumContainerAssigned() { private ScheduledThreadPoolExecutor getAssignCounterExecutor(){ return assignCounterExecutor; } + + public int getRmEventQueueSize() { + return rmDispatcherEventQueueSize.value(); + } + + public void setRmEventQueueSize(int rmEventQueueSize) { + this.rmDispatcherEventQueueSize.set(rmEventQueueSize); + } + + public int getSchedulerEventQueueSize() { + return schedulerDispatcherEventQueueSize.value(); + } + + public void setSchedulerEventQueueSize(int schedulerEventQueueSize) { + this.schedulerDispatcherEventQueueSize.set(schedulerEventQueueSize); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index b0dc218f3dd15..a813a8524e60b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.spi.container.servlet.ServletContainer; @@ -152,6 +153,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; /** @@ -715,6 +718,7 @@ public class RMActiveServices extends CompositeService { private boolean fromActive = false; private StandByTransitionRunnable standByTransitionRunnable; private RMNMInfo rmnmInfo; + private ScheduledThreadPoolExecutor eventQueueMetricExecutor; RMActiveServices(ResourceManager rm) { super("RMActiveServices"); @@ -937,6 +941,23 @@ protected void serviceInit(Configuration configuration) throws Exception { addIfService(volumeManager); } + eventQueueMetricExecutor = new ScheduledThreadPoolExecutor(1, + new ThreadFactoryBuilder(). + setDaemon(true).setNameFormat("EventQueueSizeMetricThread"). + build()); + eventQueueMetricExecutor.scheduleAtFixedRate(new Runnable() { + @Override + public void run() { + int rmEventQueueSize = ((AsyncDispatcher)getRMContext(). + getDispatcher()).getEventQueueSize(); + ClusterMetrics.getMetrics().setRmEventQueueSize(rmEventQueueSize); + int schedulerEventQueueSize = ((EventDispatcher)schedulerDispatcher). + getEventQueueSize(); + ClusterMetrics.getMetrics(). + setSchedulerEventQueueSize(schedulerEventQueueSize); + } + }, 1, 1, TimeUnit.SECONDS); + super.serviceInit(conf); } @@ -1012,6 +1033,9 @@ protected void serviceStop() throws Exception { LOG.error("Error closing store.", e); } } + if (eventQueueMetricExecutor != null) { + eventQueueMetricExecutor.shutdownNow(); + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java index 3ce4f2b51859c..c9922964ff980 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java @@ -205,6 +205,8 @@ protected void render(Block html) { SchedulerInfo schedulerInfo = new SchedulerInfo(this.rm); int schedBusy = clusterMetrics.getRmSchedulerBusyPercent(); + int rmEventQueueSize = clusterMetrics.getRmEventQueueSize(); + int schedulerEventQueueSize = clusterMetrics.getSchedulerEventQueueSize(); div.h3("Scheduler Metrics"). table("#schedulermetricsoverview"). @@ -217,6 +219,10 @@ protected void render(Block html) { th().$class("ui-state-default") .__("Maximum Cluster Application Priority").__(). th().$class("ui-state-default").__("Scheduler Busy %").__(). + th().$class("ui-state-default") + .__("RM Dispatcher EventQueue Size").__(). + th().$class("ui-state-default") + .__("Scheduler Dispatcher EventQueue Size").__(). __(). __(). tbody().$class("ui-widget-content"). @@ -228,6 +234,8 @@ protected void render(Block html) { td(schedulerInfo.getMaxAllocation().toString()). td(String.valueOf(schedulerInfo.getMaxClusterLevelAppPriority())). td(schedBusy == -1 ? UNAVAILABLE : String.valueOf(schedBusy)). + td(String.valueOf(rmEventQueueSize)). + td(String.valueOf(schedulerEventQueueSize)). __(). __().__(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java index 7dc2d8ac1ebbf..e188fa0526894 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java @@ -83,6 +83,9 @@ public class ClusterMetricsInfo { private boolean crossPartitionMetricsAvailable = false; + private int rmEventQueueSize; + private int schedulerEventQueueSize; + public ClusterMetricsInfo() { } // JAXB needs this @@ -162,6 +165,8 @@ public ClusterMetricsInfo(final ResourceScheduler rs) { + rebootedNodes + unhealthyNodes + decommissioningNodes + shutdownNodes; this.containerAssignedPerSecond = clusterMetrics .getContainerAssignedPerSecond(); + this.rmEventQueueSize = clusterMetrics.getRmEventQueueSize(); + this.schedulerEventQueueSize = clusterMetrics.getSchedulerEventQueueSize(); } public int getAppsSubmitted() { @@ -419,4 +424,12 @@ public boolean getCrossPartitionMetricsAvailable() { public int getContainerAssignedPerSecond() { return this.containerAssignedPerSecond; } + + public int getRmEventQueueSize() { + return rmEventQueueSize; + } + + public int getSchedulerEventQueueSize() { + return schedulerEventQueueSize; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java index dd271fd34d798..891c8d6c325bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java @@ -52,7 +52,7 @@ public class TestNodesPage { // Number of Actual Table Headers for NodesPage.NodesBlock might change in // future. In that case this value should be adjusted to the new value. - private final int numberOfThInMetricsTable = 23; + private final int numberOfThInMetricsTable = 25; private final int numberOfActualTableHeaders = 16; private final int numberOfThForOpportunisticContainers = 4; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java index b651c7959a42f..673fbbe2ec079 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java @@ -474,7 +474,7 @@ public void verifyClusterMetricsJSON(JSONObject json) throws JSONException, Exception { assertEquals("incorrect number of elements", 1, json.length()); JSONObject clusterinfo = json.getJSONObject("clusterMetrics"); - assertEquals("incorrect number of elements", 33, clusterinfo.length()); + assertEquals("incorrect number of elements", 35, clusterinfo.length()); verifyClusterMetrics( clusterinfo.getInt("appsSubmitted"), clusterinfo.getInt("appsCompleted"), clusterinfo.getInt("reservedMB"), clusterinfo.getInt("availableMB"), From 6847c90d4a2331de3db52d76061e26c5b0176943 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Tue, 25 May 2021 06:37:22 +0530 Subject: [PATCH 348/666] HADOOP-17722. Replace Guava Sets usage by Hadoop's own Sets in hadoop-mapreduce-project (#3032) Signed-off-by: Takanobu Asanuma --- .../v2/app/webapp/TestAMWebServices.java | 2 +- .../lib/input/TestFileInputFormat.java | 5 ++- .../v2/hs/webapp/TestHsWebServicesLogs.java | 2 +- .../mapred/TestMRTimelineEventHandling.java | 3 +- .../hadoop-mapreduce-client/pom.xml | 32 +++++++++++++++++++ 5 files changed, 37 insertions(+), 7 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java index 7317baae4a8c1..5def1d91494c4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java @@ -34,6 +34,7 @@ import org.apache.hadoop.http.JettyUtils; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.MockAppContext; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.GuiceServletConfig; import org.apache.hadoop.yarn.webapp.JerseyTestBase; @@ -48,7 +49,6 @@ import org.w3c.dom.NodeList; import org.xml.sax.InputSource; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Guice; import com.google.inject.servlet.ServletModule; import com.sun.jersey.api.client.ClientResponse; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java index 2ef6a64df9b8b..52b987aebb1c7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java @@ -25,8 +25,6 @@ import java.util.Set; import java.util.stream.Collectors; -import org.junit.Assert; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; @@ -45,7 +43,9 @@ import org.apache.hadoop.mapred.SplitLocationInfo; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.util.Sets; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -55,7 +55,6 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; @RunWith(value = Parameterized.class) public class TestFileInputFormat { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesLogs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesLogs.java index 49ff539cdc8b5..051f5c0a0cabd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesLogs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesLogs.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.mapreduce.v2.hs.webapp; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Guice; import com.google.inject.servlet.ServletModule; import com.sun.jersey.api.client.ClientResponse; @@ -32,6 +31,7 @@ import org.apache.hadoop.mapreduce.v2.hs.HistoryContext; import org.apache.hadoop.mapreduce.v2.hs.MockHistoryContext; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java index 4eb0beaad2a37..32e847886721b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java @@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.YarnApplicationState; @@ -60,8 +61,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - public class TestMRTimelineEventHandling { private static final String TIMELINE_AUX_SERVICE_NAME = "timeline_collector"; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 55940ae4ff744..5cf59df19e158 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -177,6 +177,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Sets rather than Guava provided Sets + + org.apache.hadoop.thirdparty.com.google.common.collect.Sets + com.google.common.collect.Sets + + + + + + + From dc6a639ca5c7521be83309a76fd2102649a88b6b Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Tue, 25 May 2021 07:25:58 +0530 Subject: [PATCH 349/666] HADOOP-17720. Replace Guava Sets usage by Hadoop's own Sets in HDFS (#3031) Signed-off-by: Takanobu Asanuma --- .../hadoop-hdfs-httpfs/pom.xml | 32 +++++++++++++++++++ .../service/security/DummyGroupMapping.java | 2 +- hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 32 +++++++++++++++++++ .../router/TestRouterPolicyProvider.java | 3 +- .../router/TestRouterUserMappings.java | 2 +- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 32 +++++++++++++++++++ .../java/org/apache/hadoop/hdfs/DFSUtil.java | 2 +- .../hadoop/hdfs/protocol/LayoutFlags.java | 5 --- .../qjournal/server/JournalNodeSyncer.java | 2 +- .../blockmanagement/DatanodeManager.java | 6 ++-- .../hdfs/server/datanode/BPOfferService.java | 2 +- .../server/datanode/BlockPoolManager.java | 6 ++-- .../checker/DatasetVolumeChecker.java | 2 +- .../fsdataset/impl/FsDatasetImpl.java | 2 +- .../hdfs/server/namenode/JournalSet.java | 3 +- .../namenode/NNStorageRetentionManager.java | 3 +- .../hadoop/hdfs/server/namenode/NameNode.java | 2 +- .../apache/hadoop/hdfs/MiniDFSCluster.java | 2 +- .../hadoop/hdfs/TestBlockStoragePolicy.java | 2 +- .../org/apache/hadoop/hdfs/TestDFSUtil.java | 3 +- .../hadoop/hdfs/TestHDFSPolicyProvider.java | 2 +- .../org/apache/hadoop/hdfs/TestHdfsAdmin.java | 3 +- .../hdfs/net/TestDFSNetworkTopology.java | 6 ++-- .../qjournal/client/TestQJMWithFaults.java | 6 ++-- .../server/datanode/TestRefreshNamenodes.java | 2 +- .../hdfs/server/namenode/FSImageTestUtil.java | 6 ++-- .../hadoop/hdfs/server/namenode/TestFsck.java | 5 ++- .../TestNNStorageRetentionManager.java | 10 +++--- .../server/namenode/TestNameNodeRecovery.java | 7 ++-- .../security/TestRefreshUserMappings.java | 2 +- 30 files changed, 140 insertions(+), 54 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 1916ef0e3b7f6..704bc12a9bcaf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -343,6 +343,38 @@ ${basedir}/dev-support/findbugsExcludeFile.xml + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Sets rather than Guava provided Sets + + org.apache.hadoop.thirdparty.com.google.common.collect.Sets + com.google.common.collect.Sets + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java index 80a94b18d1e51..4af93182ed341 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java @@ -23,9 +23,9 @@ import java.util.List; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.test.HadoopUsersConfTestHelper; +import org.apache.hadoop.util.Sets; public class DummyGroupMapping implements GroupMappingServiceProvider { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index e17602d1f6466..f3a303d234c7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -305,6 +305,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Sets rather than Guava provided Sets + + org.apache.hadoop.thirdparty.com.google.common.collect.Sets + com.google.common.collect.Sets + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java index c2577e67a06fa..c31869a97667e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocolPB.RouterPolicyProvider; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.security.authorize.Service; +import org.apache.hadoop.util.Sets; import org.junit.BeforeClass; import org.junit.Rule; @@ -41,8 +42,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - /** * Test suite covering RouterPolicyProvider. We expect that it contains a * security policy definition for every RPC protocol used in HDFS. The test diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java index ba8c4639e4d13..707a2f7baa348 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.federation.router; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; @@ -40,6 +39,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.tools.GetUserMappingsProtocol; +import org.apache.hadoop.util.Sets; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 95ac71bae22c8..97cfdc69d3d24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -437,6 +437,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Sets rather than Guava provided Sets + + org.apache.hadoop.thirdparty.com.google.common.collect.Sets + com.google.common.collect.Sets + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index efb2a8bcfc6d4..cd15cd06a66c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodesInPath; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -107,7 +108,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.thirdparty.protobuf.BlockingService; @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java index edc184025cefa..1d14973cfed93 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java @@ -23,11 +23,6 @@ import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - /** * LayoutFlags represent features which the FSImage and edit logs can either * support or not, independently of layout version. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java index b4997eacd0719..8a03fcadbadea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java @@ -19,7 +19,6 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; @@ -40,6 +39,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 8e5a24ba60a26..c8f5179e5c6da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -22,15 +22,12 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_DEFAULT; import static org.apache.hadoop.util.Time.monotonicNow; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; import org.apache.hadoop.fs.StorageType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -59,7 +56,10 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.Timer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import javax.annotation.Nonnull; import javax.annotation.Nullable; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 40046b0d8c454..ad6b2d61d2213 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -20,7 +20,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; @@ -33,6 +32,7 @@ import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; +import org.apache.hadoop.util.Sets; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java index 95bc2d6c23856..ef4d292a82e87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java @@ -28,12 +28,12 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; /** @@ -177,8 +177,8 @@ private void doRefreshNamenodes( throws IOException { assert Thread.holdsLock(refreshNamenodesLock); - Set toRefresh = Sets.newLinkedHashSet(); - Set toAdd = Sets.newLinkedHashSet(); + Set toRefresh = new LinkedHashSet<>(); + Set toAdd = new LinkedHashSet<>(); Set toRemove; synchronized (this) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java index 997a6d972224c..ff1b0fbe2798c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java @@ -20,7 +20,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; @@ -37,6 +36,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.VolumeCheckContext; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.Timer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 0e7b89a4c4d87..faca370b88fa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -119,12 +119,12 @@ import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.InstrumentedReadWriteLock; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Timer; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index e2b7b1c68bad4..abcab80d8a04f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -40,10 +40,11 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; +import org.apache.hadoop.util.Sets; + import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * Manages a collection of Journals. None of the methods are synchronized, it is diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java index 22be54e5576d6..065cf738c6c5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java @@ -40,7 +40,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * The NNStorageRetentionManager is responsible for inspecting the storage @@ -192,7 +191,7 @@ private long getImageTxIdToRetain( return 0L; } - TreeSet imageTxIds = Sets.newTreeSet(Collections.reverseOrder()); + TreeSet imageTxIds = new TreeSet<>(Collections.reverseOrder()); for (FSImageFile image : images) { imageTxIds.add(image.getCheckpointTxId()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 4692229be2f4e..d19e770a98a1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -21,7 +21,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import java.util.Set; import org.apache.commons.logging.Log; @@ -93,6 +92,7 @@ import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.ServicePlugin; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.GcTimeMonitor; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index aec5afc37b33a..fe80633b295be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hdfs.server.namenode.ImageServlet; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.hadoop.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -148,7 +149,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * This class creates a single-process DFS cluster for junit testing. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index ef116957fb84d..775f7b377aa42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -25,7 +25,6 @@ import java.util.*; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockStoragePolicySpi; @@ -48,6 +47,7 @@ import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.Sets; import org.junit.Assert; import static org.junit.Assert.fail; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index f1e69d0cf4af8..9a024c3084586 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -80,12 +80,11 @@ import org.apache.hadoop.security.alias.JavaKeyStoreProvider; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.Sets; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - public class TestDFSUtil { static final String NS1_NN_ADDR = "ns1-nn.example.com:8020"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java index bc763d42fd480..0a74a2d025659 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java @@ -27,12 +27,12 @@ import java.util.List; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.commons.lang3.ClassUtils; import org.apache.hadoop.hdfs.qjournal.server.JournalNodeRpcServer; import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.security.authorize.Service; +import org.apache.hadoop.util.Sets; import org.junit.BeforeClass; import org.junit.Rule; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java index 2fabbc50a2f10..a7afa66f22ae0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java @@ -45,13 +45,12 @@ import org.apache.hadoop.hdfs.protocol.OpenFilesIterator; import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.util.Sets; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - public class TestHdfsAdmin { private static final Path TEST_PATH = new Path("/test"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java index 7afa057bebb13..1d0024d44472e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java @@ -17,10 +17,7 @@ */ package org.apache.hadoop.hdfs.net; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -29,10 +26,13 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.net.Node; +import org.apache.hadoop.util.Sets; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.EnumMap; import java.util.HashMap; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java index ceca1ead10d81..6cdbe2d31d9f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java @@ -37,8 +37,6 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; @@ -53,6 +51,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Sets; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -62,7 +61,8 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.slf4j.event.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java index 0899dc6561893..60d4cca059ac9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java @@ -32,10 +32,10 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NSConf; +import org.apache.hadoop.util.Sets; import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * Tests datanode refresh namenode list functionality. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 2de9a4e5d691d..a7f13eacf6c24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -45,8 +45,6 @@ import java.util.Set; import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.permission.FsPermission; @@ -61,6 +59,7 @@ import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.Sets; import org.mockito.Mockito; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; @@ -68,8 +67,9 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.thirdparty.com.google.common.io.Files; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Utility functions for testing fsimage storage. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index b27a00d257f89..049a785481616 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -61,7 +61,6 @@ import java.util.function.Supplier; import org.apache.commons.logging.impl.Log4JLogger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSDataOutputStream; @@ -119,6 +118,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.ToolRunner; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -128,8 +128,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.slf4j.LoggerFactory; /** * A JUnit test for doing fsck. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java index f00bbca4cbf01..8314a85020eed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.util.Collection; import java.util.Comparator; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -53,7 +54,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestNNStorageRetentionManager { @@ -306,7 +306,7 @@ private void runTest(TestCaseDescription tc) throws IOException { Mockito.verify(mockPurger, Mockito.atLeast(0)) .markStale(staleLogsCaptor.capture()); - Set capturedPaths = Sets.newLinkedHashSet(); + Set capturedPaths = new LinkedHashSet<>(); // Check images for (FSImageFile captured : imagesPurgedCaptor.getAllValues()) { capturedPaths.add(fileToPath(captured.getFile())); @@ -336,9 +336,9 @@ private void runTest(TestCaseDescription tc) throws IOException { private class TestCaseDescription { private final Map dirRoots = Maps.newLinkedHashMap(); - private final Set expectedPurgedLogs = Sets.newLinkedHashSet(); - private final Set expectedPurgedImages = Sets.newLinkedHashSet(); - private final Set expectedStaleLogs = Sets.newLinkedHashSet(); + private final Set expectedPurgedLogs = new LinkedHashSet<>(); + private final Set expectedPurgedImages = new LinkedHashSet<>(); + private final Set expectedStaleLogs = new LinkedHashSet<>(); private class FakeRoot { final NameNodeDirType type; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index 9a9f5aaa7a3f5..51389c8336f16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -33,8 +33,6 @@ import java.util.Set; import org.apache.commons.io.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -51,13 +49,14 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.StringUtils; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This tests data recovery mode for the NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java index 890c6fb3ebc79..d410d3b045500 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java @@ -38,7 +38,6 @@ import java.util.List; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -48,6 +47,7 @@ import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; From 00cbc2784d49222f6873fe77e4254a08471916f3 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Tue, 25 May 2021 14:40:20 +0530 Subject: [PATCH 350/666] HADOOP-17721. Replace Guava Sets usage by Hadoop's own Sets in hadoop-yarn-project (#3033) Signed-off-by: Takanobu Asanuma --- .../TestPlacementConstraintParser.java | 3 +- .../hadoop/yarn/service/webapp/ApiServer.java | 2 +- .../hadoop/yarn/service/TestApiServer.java | 2 +- .../yarn/service/utils/ServiceApiUtil.java | 2 +- .../hadoop/yarn/client/cli/TestYarnCLI.java | 2 +- .../pb/GetNodesToLabelsResponsePBImpl.java | 3 +- .../logaggregation/AggregatedLogFormat.java | 2 +- .../store/op/RemoveClusterLabelOp.java | 2 +- .../pb/ReplaceLabelsOnNodeRequestPBImpl.java | 3 +- .../hadoop/yarn/webapp/hamlet2/HamletGen.java | 3 +- .../yarn/api/BasePBImplRecordsTest.java | 2 +- .../pb/TestGetApplicationsRequestPBImpl.java | 2 +- ...estApplicationSubmissionContextPBImpl.java | 2 +- .../yarn/nodelabels/NodeLabelTestBase.java | 2 +- .../TestCommonNodeLabelsManager.java | 2 +- .../protocolrecords/TestProtocolRecords.java | 3 +- .../resources/gpu/GpuResourceAllocator.java | 8 +-- .../logaggregation/AppLogAggregatorImpl.java | 6 +- .../deviceframework/DeviceMappingManager.java | 10 ++-- .../resourceplugin/fpga/FpgaDiscoverer.java | 2 +- .../resourceplugin/gpu/GpuDiscoverer.java | 2 +- .../containermanager/TestAuxServices.java | 2 +- .../TestResourceLocalizationService.java | 3 +- .../gpu/TestNvidiaDockerV1CommandPlugin.java | 2 +- .../gpu/TestNvidiaDockerV2CommandPlugin.java | 2 +- .../server/resourcemanager/RMServerUtils.java | 6 +- .../monitor/SchedulingMonitorManager.java | 7 ++- .../MappingRuleValidationContextImpl.java | 2 +- .../scheduler/capacity/AbstractCSQueue.java | 2 +- .../scheduler/capacity/CSQueueUtils.java | 3 +- .../scheduler/capacity/LeafQueue.java | 6 +- .../scheduler/capacity/ParentQueue.java | 2 +- .../fair/allocation/AllocationFileParser.java | 2 +- .../converter/QueuePlacementConverter.java | 2 +- .../webapp/ApplicationsRequestBuilder.java | 2 +- .../webapp/RMWebAppFilter.java | 2 +- .../resourcemanager/TestAppManager.java | 2 +- .../resourcemanager/TestClientRMService.java | 6 +- .../server/resourcemanager/TestRMRestart.java | 8 +-- .../nodelabels/TestNodeAttributesManager.java | 2 +- .../placement/MockQueueHierarchyBuilder.java | 2 +- .../csmappingrule/TestMappingRule.java | 2 +- .../TestMappingRuleMatchers.java | 2 +- .../scheduler/TestAbstractYarnScheduler.java | 2 +- .../scheduler/TestSchedulerUtils.java | 6 +- .../capacity/CapacitySchedulerTestBase.java | 2 +- .../capacity/TestApplicationLimits.java | 6 +- .../capacity/TestCapacityScheduler.java | 2 +- ...estCapacitySchedulerAutoQueueCreation.java | 6 +- .../TestCapacitySchedulerLazyPreemption.java | 2 +- .../TestCapacitySchedulerMaxParallelApps.java | 3 +- .../TestCapacitySchedulerNodeLabelUpdate.java | 2 +- .../TestCapacitySchedulerWeightMode.java | 2 +- .../TestNodeLabelContainerAllocation.java | 2 +- .../scheduler/capacity/TestUtils.java | 6 +- ...stWorkPreservingRMRestartForNodeLabel.java | 2 +- .../TestAllocationTagsNamespace.java | 2 +- ...TestPlacementConstraintManagerService.java | 2 +- .../scheduler/fair/TestFairScheduler.java | 2 +- .../scheduler/fair/TestQueueManager.java | 2 +- .../fair/converter/TestFSQueueConverter.java | 4 +- .../TestApplicationsRequestBuilder.java | 2 +- .../webapp/TestRMWebServiceAppsNodelabel.java | 2 +- .../webapp/TestRMWebServicesApps.java | 2 +- .../TestRMWebServicesForCSWithPartitions.java | 2 +- .../FairSchedulerJsonVerifications.java | 2 +- .../FairSchedulerXmlVerifications.java | 2 +- .../ResourceRequestsXmlVerifications.java | 2 +- .../timeline/EntityGroupPlugInForTest.java | 2 +- .../cosmosdb/CosmosDBDocumentStoreReader.java | 2 +- ...TimelineReaderWebServicesHBaseStorage.java | 56 +++++++++++-------- .../collector/TestTimelineCollector.java | 2 +- .../TestTimelineReaderWebServicesUtils.java | 3 +- hadoop-yarn-project/hadoop-yarn/pom.xml | 32 +++++++++++ 74 files changed, 167 insertions(+), 133 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java index 708ac928b39a0..ffc83735de475 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.yarn.api.resource; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.TreeSet; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode; import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint; import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java index 94b3a01e3f2ce..c7dc50455dde0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java @@ -19,13 +19,13 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Inject; import com.google.inject.Singleton; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java index cf04e12d6d27d..7d895d1cdd6c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java @@ -32,9 +32,9 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.service.api.records.Artifact; import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum; import org.apache.hadoop.yarn.service.api.records.Component; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java index a67f3f61b4148..c1874c16f4bc7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java @@ -23,7 +23,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -31,6 +30,7 @@ import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.registry.client.binding.RegistryUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.service.api.records.ComponentContainers; import org.apache.hadoop.yarn.service.api.records.ComponentState; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 1ed7dab2b8694..dc9137e8fac9c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -52,6 +52,7 @@ import org.apache.commons.cli.Options; import org.apache.commons.lang3.time.DateFormatUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -101,7 +102,6 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestYarnCLI { private static final Logger LOG = LoggerFactory.getLogger(TestYarnCLI.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java index 20732aa38010c..6b7c45afaccaa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java @@ -26,6 +26,7 @@ import java.util.Map.Entry; import java.util.Set; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; @@ -37,8 +38,6 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsResponseProtoOrBuilder; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - public class GetNodesToLabelsResponsePBImpl extends GetNodesToLabelsResponse { GetNodesToLabelsResponseProto proto = GetNodesToLabelsResponseProto diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java index 0fa9764b7bb1f..9f4bdb6c03285 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java @@ -65,6 +65,7 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.file.tfile.TFile; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LogAggregationContext; @@ -75,7 +76,6 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; @Public @Evolving diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java index 5280afd41bee6..7057119e8d2ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.nodelabels.store.op; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos; import org.apache.hadoop.yarn.server.api.protocolrecords diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java index 66fddad2d792a..b2f491950d025 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java @@ -25,6 +25,7 @@ import java.util.Map.Entry; import java.util.Set; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; @@ -33,8 +34,6 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProtoOrBuilder; import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - public class ReplaceLabelsOnNodeRequestPBImpl extends ReplaceLabelsOnNodeRequest { ReplaceLabelsOnNodeRequestProto proto = ReplaceLabelsOnNodeRequestProto diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java index 722dd7347338b..0a8f016ac328a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java @@ -18,8 +18,6 @@ package org.apache.hadoop.yarn.webapp.hamlet2; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - import java.io.IOException; import java.io.PrintWriter; import java.lang.annotation.Annotation; @@ -34,6 +32,7 @@ import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.webapp.WebAppException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java index 22b687c159e15..8014016ce5366 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java @@ -19,8 +19,8 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.commons.lang3.Range; +import org.apache.hadoop.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.resource.PlacementConstraint; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/TestGetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/TestGetApplicationsRequestPBImpl.java index ff9c778139fe5..35f9aa54e6478 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/TestGetApplicationsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/TestGetApplicationsRequestPBImpl.java @@ -25,7 +25,7 @@ import java.util.Collection; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; import org.junit.Test; import org.junit.runner.RunWith; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationSubmissionContextPBImpl.java index a0d8bb3d69250..b6b2dbb6fb339 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationSubmissionContextPBImpl.java @@ -25,7 +25,7 @@ import java.util.Collection; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto; import org.junit.Test; import org.junit.runner.RunWith; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java index 9cb2b95e95202..798d8835c81fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java @@ -25,12 +25,12 @@ import java.util.Map.Entry; import java.util.Set; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.junit.Assert; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class NodeLabelTestBase { public static void assertMapEquals(Map> expected, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java index d9f9389866e43..5a5cab85b47f0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java @@ -29,6 +29,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -39,7 +40,6 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestCommonNodeLabelsManager extends NodeLabelTestBase { DummyCommonNodeLabelsManager mgr = null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java index f7a51bacf6e09..e89c604777d59 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java @@ -31,6 +31,7 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -59,8 +60,6 @@ import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - public class TestProtocolRecords { @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java index 555cb68dc833b..e8ab050386450 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java @@ -22,9 +22,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; @@ -35,6 +33,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.AssignedGpuDevice; import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.GpuDevice; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.io.Serializable; @@ -271,7 +271,7 @@ private synchronized GpuAllocation internalAssignGpus(Container container) } return new GpuAllocation(assignedGpus, - Sets.difference(allowedGpuDevices, assignedGpus)); + Sets.differenceInTreeSets(allowedGpuDevices, assignedGpus)); } return new GpuAllocation(null, allowedGpuDevices); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java index 4cc0dc3c71369..73634a9f56b98 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java @@ -29,8 +29,6 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; @@ -40,6 +38,7 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -73,7 +72,8 @@ import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class AppLogAggregatorImpl implements AppLogAggregator { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java index c34a55bd917ed..c7e63fec4c8fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java @@ -21,9 +21,7 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; @@ -33,6 +31,8 @@ import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePluginScheduler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.io.Serializable; @@ -207,7 +207,7 @@ private synchronized DeviceAllocation internalAssignDevices( } return new DeviceAllocation(resourceName, assignedDevices, - Sets.difference(allowedDevices, assignedDevices)); + Sets.differenceInTreeSets(allowedDevices, assignedDevices)); } return new DeviceAllocation(resourceName, null, allAllowedDevices.get(resourceName)); @@ -327,7 +327,7 @@ private void pickAndDoSchedule(Set allowed, } // Pass in unmodifiable set Set dpsAllocated = dps.allocateDevices( - Sets.difference(allowed, used.keySet()), + Sets.differenceInTreeSets(allowed, used.keySet()), count, ImmutableMap.copyOf(env)); if (dpsAllocated.size() != count) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java index 42686af09dcb1..20dc69d415113 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -43,7 +44,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class FpgaDiscoverer extends Configured { private static final Logger LOG = LoggerFactory.getLogger( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java index f9be9fe257db0..8d291569c5f12 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java @@ -23,11 +23,11 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.GpuDeviceInformation; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index 510af8958021b..0d67c13b64734 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -36,6 +36,7 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.server.nodemanager.containermanager.records.AuxServiceRecord; import org.apache.hadoop.yarn.server.nodemanager.containermanager.records.AuxServiceRecords; import org.junit.After; @@ -47,7 +48,6 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import java.io.File; import java.io.IOException; import java.net.URL; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java index 38bcb640d2c46..b6fc0b9bf5813 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java @@ -66,6 +66,7 @@ import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState; import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; @@ -161,8 +162,6 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - public class TestResourceLocalizationService { static final Path basedir = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java index bd064239c456d..a6f55631addcd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java @@ -19,11 +19,11 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV2CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV2CommandPlugin.java index d9300c63b10eb..beb72bf9ef9ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV2CommandPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV2CommandPlugin.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java index 880201f87e9d3..79115902bcd3c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java @@ -30,14 +30,12 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; @@ -83,6 +81,8 @@ import org.apache.hadoop.yarn.util.SystemClock; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.util.resource.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Utility methods to aid serving RM data through the REST and RPC APIs diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java index f3c10ea0b71ef..7e9d34d941b78 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java @@ -18,15 +18,16 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.HashMap; import java.util.HashSet; import java.util.Map; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationContextImpl.java index bff6d4f4e0219..433af7199d831 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/MappingRuleValidationContextImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 250fcc716d645..be9a0e36391c5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -25,7 +25,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Priority; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java index a4034768387b1..e2865cf5c1bcb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java @@ -19,6 +19,7 @@ import java.util.Set; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; @@ -28,8 +29,6 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - public class CSQueueUtils { public final static float EPSILON = 0.0001f; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 89e7f83800376..71732f65e34cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -26,13 +26,12 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.time.DateUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -77,7 +76,8 @@ import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 798c71037845d..6b6dd5afd3ed2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -30,7 +30,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileParser.java index 41478ec1d536d..bf76aac620841 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileParser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileParser.java @@ -17,7 +17,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocation; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfigurationException; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.ConfigurableResource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/QueuePlacementConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/QueuePlacementConverter.java index 65d950534026c..6ff19e21a6e71 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/QueuePlacementConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/QueuePlacementConverter.java @@ -19,7 +19,7 @@ import java.util.List; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.server.resourcemanager.placement.DefaultPlacementRule; import org.apache.hadoop.yarn.server.resourcemanager.placement.FSPlacementRule; import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java index 4d071a68038f4..1b5a35c8ae206 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java index ce0f90ab4dd93..4f9cfdc8d5132 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java @@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.http.IsActiveServlet; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -52,7 +53,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Injector; import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index 8e53b1a20904d..7919650c7b60e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -21,7 +21,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.QueueACL; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index a4629017a53f1..7806845a2edeb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -56,11 +56,10 @@ import java.util.concurrent.CyclicBarrier; import org.apache.commons.io.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope; @@ -188,7 +187,8 @@ import org.mockito.stubbing.Answer; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestClientRMService { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java index fa6cf3dbad4b5..c5c15e2f3dbae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java @@ -44,9 +44,6 @@ import java.util.concurrent.ConcurrentMap; import org.apache.commons.io.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.event.Level; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataOutputBuffer; @@ -61,6 +58,7 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; @@ -136,7 +134,9 @@ import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; public class TestRMRestart extends ParameterizedSchedulerTestBase { private static final Logger LOG = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestNodeAttributesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestNodeAttributesManager.java index e10f1d0189088..43df13e262ea9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestNodeAttributesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestNodeAttributesManager.java @@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.nodelabels; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.NodeAttribute; import org.apache.hadoop.yarn.api.records.NodeAttributeType; import org.apache.hadoop.yarn.conf.YarnConfiguration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MockQueueHierarchyBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MockQueueHierarchyBuilder.java index b268e5acdb505..a102feae7c02e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MockQueueHierarchyBuilder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/MockQueueHierarchyBuilder.java @@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.placement; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.commons.compress.utils.Lists; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AbstractCSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRule.java index eb0441ca963d0..5421f47641962 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRule.java @@ -21,7 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; import org.junit.Test; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleMatchers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleMatchers.java index 3e06863d26675..d245051600b22 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleMatchers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/csmappingrule/TestMappingRuleMatchers.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.placement.csmappingrule; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import junit.framework.TestCase; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.server.resourcemanager.placement.VariableContext; import org.junit.Test; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java index 47174a47b05be..990b7a3c464ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java @@ -34,10 +34,10 @@ import java.util.stream.Collectors; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index ca3af3d423fce..bc41c359c2e1a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -43,13 +43,12 @@ import java.util.Set; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; @@ -105,9 +104,10 @@ import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.junit.rules.ExpectedException; import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestSchedulerUtils { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java index 229bb0f0a74c6..f36b1e2486856 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java index 84608effa317f..53416329211e6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java @@ -35,10 +35,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; @@ -80,7 +79,8 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestApplicationLimits { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index d8dc4dc61a9e8..074549187ea97 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -55,7 +55,7 @@ import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.service.ServiceStateException; import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java index ece13c90bdd94..d5c7bc58ca84e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java @@ -17,10 +17,8 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -77,6 +75,8 @@ import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.Assert; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Collections; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java index fceef183d7261..ee6a882a37709 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.ResourceRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java index 38edda64462e9..39bd38030f2a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Set; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -49,8 +50,6 @@ import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - public class TestCapacitySchedulerMaxParallelApps { private CapacitySchedulerConfiguration conf; private MockRM rm; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java index d7f81c46260fc..e333c6a423841 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java @@ -26,6 +26,7 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -56,7 +57,6 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestCapacitySchedulerNodeLabelUpdate { private final int GB = 1024; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java index 77c8e053e3bc0..300e4cd097a5a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java @@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java index e21a60f3d7e9a..30ee24d6e5532 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; @@ -72,7 +73,6 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestNodeLabelContainerAllocation { private final int GB = 1024; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 357b19f53b28c..026206ac38f47 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -18,10 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.*; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.Event; @@ -50,6 +48,8 @@ import org.apache.hadoop.yarn.util.resource.Resources; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Map; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java index 1259f385bfa83..a431d4700b7b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java @@ -23,6 +23,7 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -52,7 +53,6 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestWorkPreservingRMRestartForNodeLabel { private Configuration conf; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsNamespace.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsNamespace.java index 19cb24d1ac511..b54631c4c7b46 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsNamespace.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsNamespace.java @@ -16,7 +16,7 @@ * limitations under the License. */ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.junit.Assert; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java index 5951e190b88c3..d9376037de67d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java @@ -34,7 +34,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.SchedulingRequest; import org.apache.hadoop.yarn.api.resource.PlacementConstraint; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index a2ed6daea763f..c8a2225f5669a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAServiceProtocol; @@ -27,6 +26,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.GroupMappingServiceProvider; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java index 190259b9f29de..3400822074e05 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java @@ -23,6 +23,7 @@ import java.util.Collections; import java.util.Set; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -36,7 +37,6 @@ import org.mockito.Mockito; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * Test the {@link FairScheduler} queue manager correct queue hierarchies diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java index cd562fb4502e2..7d615bbb2540b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java @@ -29,6 +29,7 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -47,9 +48,6 @@ import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - - /** * Unit tests for FSQueueConverter. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java index 3c840d3f62841..93e800c543046 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java index 6373ccaa482c0..7397926732e95 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java @@ -28,6 +28,7 @@ import javax.ws.rs.core.MediaType; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; @@ -54,7 +55,6 @@ import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Guice; import com.google.inject.servlet.ServletModule; import com.sun.jersey.api.client.ClientResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java index fb77e2d764ff2..e22311ccbda65 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java @@ -18,7 +18,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Guice; import com.google.inject.servlet.ServletModule; import com.sun.jersey.api.client.ClientResponse; @@ -31,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.JettyUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.ResourceRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java index 8ada04c0592af..86013a0c48f56 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java @@ -40,8 +40,8 @@ import javax.xml.parsers.DocumentBuilderFactory; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.http.JettyUtils; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Priority; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java index bffa5309eb303..bcdfbabd8b05a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.codehaus.jettison.json.JSONArray; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java index 4d0f361959939..35bfd0cbaa140 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.w3c.dom.Document; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java index 45bc46ebc74e5..40e626098f1dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java @@ -20,7 +20,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/EntityGroupPlugInForTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/EntityGroupPlugInForTest.java index 0195c9de02381..f239fac32395b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/EntityGroupPlugInForTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/EntityGroupPlugInForTest.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.timeline; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId; import org.apache.hadoop.yarn.util.ConverterUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java index 6f17a9869535e..616376ae7b459 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java @@ -19,11 +19,11 @@ package org.apache.hadoop.yarn.server.timelineservice.documentstore.reader.cosmosdb; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.microsoft.azure.cosmosdb.FeedOptions; import com.microsoft.azure.cosmosdb.FeedResponse; import com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext; import org.apache.hadoop.yarn.server.timelineservice.documentstore.DocumentStoreUtils; import org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.document.NoDocumentFoundException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java index ad0fd64a7d0e3..b35b3dc8b79db 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java @@ -27,6 +27,8 @@ import java.net.URI; import java.text.DateFormat; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -61,7 +63,6 @@ import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.ClientResponse.Status; @@ -262,20 +263,24 @@ private static void loadData() throws Exception { event54.setTimestamp(cTime); entity5.addEvent(event54); Map> isRelatedTo1 = new HashMap>(); - isRelatedTo1.put("type2", - Sets.newHashSet("entity21", "entity22", "entity23", "entity24")); - isRelatedTo1.put("type4", Sets.newHashSet("entity41", "entity42")); - isRelatedTo1.put("type1", Sets.newHashSet("entity14", "entity15")); - isRelatedTo1.put("type3", - Sets.newHashSet("entity31", "entity35", "entity32", "entity33")); + isRelatedTo1.put("type2", new HashSet<>(Arrays.asList("entity21", + "entity22", "entity23", "entity24"))); + isRelatedTo1.put("type4", new HashSet<>(Arrays.asList("entity41", + "entity42"))); + isRelatedTo1.put("type1", new HashSet<>(Arrays.asList("entity14", + "entity15"))); + isRelatedTo1.put("type3", new HashSet<>(Arrays.asList("entity31", + "entity35", "entity32", "entity33"))); entity5.addIsRelatedToEntities(isRelatedTo1); Map> relatesTo1 = new HashMap>(); - relatesTo1.put("type2", - Sets.newHashSet("entity21", "entity22", "entity23", "entity24")); - relatesTo1.put("type4", Sets.newHashSet("entity41", "entity42")); - relatesTo1.put("type1", Sets.newHashSet("entity14", "entity15")); - relatesTo1.put("type3", - Sets.newHashSet("entity31", "entity35", "entity32", "entity33")); + relatesTo1.put("type2", new HashSet<>(Arrays.asList("entity21", + "entity22", "entity23", "entity24"))); + relatesTo1.put("type4", new HashSet<>(Arrays.asList("entity41", + "entity42"))); + relatesTo1.put("type1", new HashSet<>(Arrays.asList("entity14", + "entity15"))); + relatesTo1.put("type3", new HashSet<>(Arrays.asList("entity31", + "entity35", "entity32", "entity33"))); entity5.addRelatesToEntities(relatesTo1); userEntities.addEntity(new SubApplicationEntity(entity5)); @@ -323,18 +328,25 @@ private static void loadData() throws Exception { event64.setTimestamp(cTime); entity6.addEvent(event64); Map> isRelatedTo2 = new HashMap>(); - isRelatedTo2.put("type2", - Sets.newHashSet("entity21", "entity22", "entity23", "entity24")); - isRelatedTo2.put("type5", Sets.newHashSet("entity51", "entity52")); - isRelatedTo2.put("type6", Sets.newHashSet("entity61", "entity66")); - isRelatedTo2.put("type3", Sets.newHashSet("entity31")); + isRelatedTo2.put("type2", new HashSet<>(Arrays.asList("entity21", + "entity22", "entity23", "entity24"))); + isRelatedTo2.put("type5", new HashSet<>(Arrays.asList("entity51", + "entity52"))); + isRelatedTo2.put("type6", new HashSet<>(Arrays.asList("entity61", + "entity66"))); + isRelatedTo2.put("type3", + new HashSet<>(Collections.singletonList("entity31"))); entity6.addIsRelatedToEntities(isRelatedTo2); Map> relatesTo2 = new HashMap>(); relatesTo2.put("type2", - Sets.newHashSet("entity21", "entity22", "entity23", "entity24")); - relatesTo2.put("type5", Sets.newHashSet("entity51", "entity52")); - relatesTo2.put("type6", Sets.newHashSet("entity61", "entity66")); - relatesTo2.put("type3", Sets.newHashSet("entity31")); + new HashSet<>(Arrays.asList("entity21", "entity22", "entity23", + "entity24"))); + relatesTo2.put("type5", new HashSet<>(Arrays.asList("entity51", + "entity52"))); + relatesTo2.put("type6", new HashSet<>(Arrays.asList("entity61", + "entity66"))); + relatesTo2.put("type3", + new HashSet<>(Collections.singletonList("entity31"))); entity6.addRelatesToEntities(relatesTo2); userEntities.addEntity(new SubApplicationEntity(entity6)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java index 5ace793e3ecf1..96636f8bf8c46 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineDomain; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetricOperation; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -33,7 +34,6 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.mockito.internal.stubbing.answers.AnswersWithDelay; import org.mockito.internal.stubbing.answers.Returns; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java index 834eb87c2478a..882189c68201b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareFilter; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareOp; import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineExistsFilter; @@ -34,8 +35,6 @@ import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; - public class TestTimelineReaderWebServicesUtils { private static void verifyFilterList(String expr, TimelineFilterList list, TimelineFilterList expectedList) throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index e97e35608553a..5cfc0745144b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -78,6 +78,38 @@ org.apache.hadoop.yarn.proto:org.apache.hadoop.yarn.federation.proto:org.apache.hadoop.yarn.service + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Sets rather than Guava provided Sets + + org.apache.hadoop.thirdparty.com.google.common.collect.Sets + com.google.common.collect.Sets + + + + + + + From fdbb3d374b3fe1fd53117f8f175b127b1c3c364c Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Tue, 25 May 2021 10:25:41 +0100 Subject: [PATCH 351/666] HADOOP-17511. Add audit/telemetry logging to S3A connector (#2807) The S3A connector supports "an auditor", a plugin which is invoked at the start of every filesystem API call, and whose issued "audit span" provides a context for all REST operations against the S3 object store. The standard auditor sets the HTTP Referrer header on the requests with information about the API call, such as process ID, operation name, path, and even job ID. If the S3 bucket is configured to log requests, this information will be preserved there and so can be used to analyze and troubleshoot storage IO. Contributed by Steve Loughran. --- .../hadoop/fs/audit/AuditConstants.java | 108 ++ .../hadoop/fs/audit/AuditStatisticNames.java | 50 + .../hadoop/fs/audit/CommonAuditContext.java | 288 ++++ .../apache/hadoop/fs/audit/package-info.java | 29 + .../org/apache/hadoop/fs/shell/Command.java | 3 + .../org/apache/hadoop/fs/shell/PathData.java | 18 +- .../fs/statistics/StoreStatisticNames.java | 10 + .../impl/EmptyIOStatisticsStore.java | 182 +++ .../statistics/impl/IOStatisticsBinding.java | 42 + .../hadoop/fs/store}/LogExactlyOnce.java | 12 +- .../store/audit/ActiveThreadSpanSource.java | 38 + .../fs/store/audit/AuditEntryPoint.java | 55 + .../hadoop/fs/store/audit/AuditSpan.java | 109 ++ .../fs/store/audit/AuditSpanSource.java | 50 + .../fs/store/audit/AuditingFunctions.java | 121 ++ .../store/audit/HttpReferrerAuditHeader.java | 503 ++++++ .../hadoop/fs/store/audit/package-info.java | 29 + .../service/launcher/ServiceLauncher.java | 2 + .../org/apache/hadoop/util/ToolRunner.java | 5 + .../util/functional/RemoteIterators.java | 34 +- .../fs/audit/TestCommonAuditContext.java | 161 ++ .../hadoop/fs/contract/ContractTestUtils.java | 10 +- .../CredentialInitializationException.java | 9 + .../org/apache/hadoop/fs/s3a/Invoker.java | 38 +- .../org/apache/hadoop/fs/s3a/Listing.java | 131 +- .../apache/hadoop/fs/s3a/MultipartUtils.java | 110 +- .../hadoop/fs/s3a/S3ABlockOutputStream.java | 12 +- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 1382 +++++++++++------ .../apache/hadoop/fs/s3a/S3AInputStream.java | 45 +- .../hadoop/fs/s3a/S3AInstrumentation.java | 72 +- .../hadoop/fs/s3a/S3ALocatedFileStatus.java | 1 + .../hadoop/fs/s3a/S3AReadOpContext.java | 18 +- .../org/apache/hadoop/fs/s3a/S3AUtils.java | 26 +- .../apache/hadoop/fs/s3a/S3ClientFactory.java | 5 + .../org/apache/hadoop/fs/s3a/Statistic.java | 72 +- .../hadoop/fs/s3a/WriteOperationHelper.java | 290 ++-- .../apache/hadoop/fs/s3a/WriteOperations.java | 18 +- .../hadoop/fs/s3a/api/RequestFactory.java | 296 ++++ .../hadoop/fs/s3a/api/package-info.java | 34 + .../fs/s3a/audit/AWSAuditEventCallbacks.java | 160 ++ .../fs/s3a/audit/AWSRequestAnalyzer.java | 296 ++++ .../fs/s3a/audit/AuditFailureException.java | 41 + .../hadoop/fs/s3a/audit/AuditIntegration.java | 137 ++ .../hadoop/fs/s3a/audit/AuditManagerS3A.java | 92 ++ .../hadoop/fs/s3a/audit/AuditSpanS3A.java | 29 + .../hadoop/fs/s3a/audit/OperationAuditor.java | 73 + .../fs/s3a/audit/OperationAuditorOptions.java | 74 + .../fs/s3a/audit/S3AAuditConstants.java | 104 ++ .../hadoop/fs/s3a/audit/S3LogParser.java | 309 ++++ .../hadoop/fs/s3a/audit/S3LogVerbs.java | 51 + .../s3a/audit/impl/AbstractAuditSpanImpl.java | 99 ++ .../audit/impl/AbstractOperationAuditor.java | 121 ++ .../s3a/audit/impl/ActiveAuditManagerS3A.java | 773 +++++++++ .../fs/s3a/audit/impl/LoggingAuditor.java | 435 ++++++ .../s3a/audit/impl/NoopAuditManagerS3A.java | 185 +++ .../hadoop/fs/s3a/audit/impl/NoopAuditor.java | 99 ++ .../hadoop/fs/s3a/audit/impl/NoopSpan.java | 109 ++ .../audit/impl/S3AInternalAuditConstants.java | 44 + .../fs/s3a/audit/impl/package-info.java | 28 + .../hadoop/fs/s3a/audit/package-info.java | 41 + .../fs/s3a/commit/AbstractS3ACommitter.java | 66 +- .../fs/s3a/commit/CommitOperations.java | 31 +- .../hadoop/fs/s3a/commit/CommitUtils.java | 24 + .../commit/InternalCommitterConstants.java | 6 + .../fs/s3a/commit/MagicCommitIntegration.java | 15 +- .../s3a/commit/magic/MagicCommitTracker.java | 2 +- .../s3a/commit/staging/StagingCommitter.java | 2 + .../fs/s3a/impl/AbstractStoreOperation.java | 40 +- .../hadoop/fs/s3a/impl/CallableSupplier.java | 52 +- .../fs/s3a/impl/ChangeDetectionPolicy.java | 1 + .../hadoop/fs/s3a/impl/ContextAccessors.java | 21 +- .../hadoop/fs/s3a/impl/DeleteOperation.java | 17 +- .../fs/s3a/impl/ExecutingStoreOperation.java | 40 +- .../s3a/impl/GetContentSummaryOperation.java | 208 +++ .../hadoop/fs/s3a/impl/HeaderProcessing.java | 44 +- .../hadoop/fs/s3a/impl/InternalConstants.java | 10 + .../s3a/impl/ListingOperationCallbacks.java | 26 +- .../hadoop/fs/s3a/impl/MkdirOperation.java | 184 +++ .../hadoop/fs/s3a/impl/RenameOperation.java | 22 +- .../fs/s3a/impl/RequestFactoryImpl.java | 695 +++++++++ .../hadoop/fs/s3a/impl/StoreContext.java | 60 +- .../fs/s3a/impl/StoreContextBuilder.java | 18 +- .../s3a/s3guard/DumpS3GuardDynamoTable.java | 32 +- .../fs/s3a/s3guard/DynamoDBMetadataStore.java | 4 +- .../hadoop/fs/s3a/s3guard/S3GuardTool.java | 57 +- .../hadoop/fs/s3a/select/SelectBinding.java | 29 +- .../impl/ForwardingIOStatisticsStore.java | 186 +++ .../hadoop/fs/s3a/tools/MarkerTool.java | 9 +- .../markdown/tools/hadoop-aws/auditing.md | 389 +++++ .../tools/hadoop-aws/auditing_architecture.md | 323 ++++ .../site/markdown/tools/hadoop-aws/index.md | 2 + .../tools/hadoop-aws/audit-architecture.png | Bin 0 -> 55701 bytes .../fs/contract/s3a/ITestS3AContractSeek.java | 2 +- .../hadoop/fs/s3a/AbstractS3ATestBase.java | 46 + .../hadoop/fs/s3a/ITestS3AEmptyDirectory.java | 7 +- .../fs/s3a/ITestS3AFailureHandling.java | 13 +- .../hadoop/fs/s3a/ITestS3AMiscOperations.java | 23 +- .../hadoop/fs/s3a/ITestS3AMultipartUtils.java | 4 +- .../fs/s3a/ITestS3ARemoteFileChanged.java | 2 +- .../hadoop/fs/s3a/ITestS3GuardEmptyDirs.java | 45 +- .../hadoop/fs/s3a/MockS3AFileSystem.java | 44 +- .../hadoop/fs/s3a/MultipartTestUtils.java | 57 +- .../apache/hadoop/fs/s3a/S3ATestUtils.java | 46 - .../fs/s3a/TestS3ABlockOutputStream.java | 11 +- .../fs/s3a/audit/AbstractAuditingTest.java | 213 +++ .../fs/s3a/audit/AccessCheckingAuditor.java | 53 + .../hadoop/fs/s3a/audit/AuditTestSupport.java | 123 ++ .../fs/s3a/audit/ITestAuditAccessChecks.java | 201 +++ .../fs/s3a/audit/ITestAuditManager.java | 135 ++ .../fs/s3a/audit/SimpleAWSRequestHandler.java | 54 + .../fs/s3a/audit/TestAuditIntegration.java | 216 +++ .../fs/s3a/audit/TestAuditSpanLifecycle.java | 133 ++ .../audit/TestHttpReferrerAuditHeader.java | 323 ++++ .../fs/s3a/audit/TestLoggingAuditor.java | 203 +++ .../hadoop/fs/s3a/auth/ITestAssumeRole.java | 3 +- .../fs/s3a/commit/AbstractCommitITest.java | 12 +- .../s3a/commit/AbstractITCommitProtocol.java | 3 +- .../fs/s3a/commit/ITestCommitOperations.java | 3 +- .../hadoop/fs/s3a/impl/ITestXAttrCost.java | 3 +- .../fs/s3a/impl/TestHeaderProcessing.java | 27 +- .../s3a/impl/TestPartialDeleteFailures.java | 16 +- .../fs/s3a/impl/TestRequestFactory.java | 185 +++ .../s3a/performance/AbstractS3ACostTest.java | 33 +- .../ITestDirectoryMarkerListing.java | 13 +- .../ITestS3AMiscOperationCost.java | 143 ++ .../fs/s3a/performance/ITestS3AMkdirCost.java | 185 +++ .../performance/OperationCostValidator.java | 3 +- .../fs/s3a/s3guard/ITestS3GuardFsck.java | 2 - .../fs/s3a/s3guard/ITestS3GuardToolLocal.java | 47 +- .../s3a/scale/AbstractSTestS3AHugeFiles.java | 3 +- .../scale/ITestS3ADirectoryPerformance.java | 137 +- .../MinimalListingOperationCallbacks.java | 10 +- .../src/test/resources/core-site.xml | 13 + .../src/test/resources/log4j.properties | 6 + 134 files changed, 11982 insertions(+), 1152 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditConstants.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditStatisticNames.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/CommonAuditContext.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/package-info.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EmptyIOStatisticsStore.java rename {hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl => hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store}/LogExactlyOnce.java (81%) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/ActiveThreadSpanSource.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditEntryPoint.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditSpan.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditSpanSource.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditingFunctions.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/HttpReferrerAuditHeader.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/package-info.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/audit/TestCommonAuditContext.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/package-info.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AWSAuditEventCallbacks.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AWSRequestAnalyzer.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditFailureException.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditIntegration.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditManagerS3A.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditSpanS3A.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/OperationAuditor.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/OperationAuditorOptions.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3AAuditConstants.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3LogParser.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3LogVerbs.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/AbstractAuditSpanImpl.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/AbstractOperationAuditor.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/ActiveAuditManagerS3A.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/LoggingAuditor.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopAuditManagerS3A.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopAuditor.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopSpan.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/S3AInternalAuditConstants.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/package-info.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/package-info.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/GetContentSummaryOperation.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MkdirOperation.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java create mode 100644 hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/ForwardingIOStatisticsStore.java create mode 100644 hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/auditing.md create mode 100644 hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/auditing_architecture.md create mode 100644 hadoop-tools/hadoop-aws/src/site/resources/tools/hadoop-aws/audit-architecture.png create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AbstractAuditingTest.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AccessCheckingAuditor.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AuditTestSupport.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditAccessChecks.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManager.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/SimpleAWSRequestHandler.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditIntegration.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditSpanLifecycle.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestHttpReferrerAuditHeader.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestLoggingAuditor.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestRequestFactory.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMkdirCost.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditConstants.java new file mode 100644 index 0000000000000..d9629e388b384 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditConstants.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.audit; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Constants related to auditing. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class AuditConstants { + + private AuditConstants() { + } + + /** + * The host from where requests originate: {@value}. + * example.org is used as the IETF require that it never resolves. + * This isn't always met by some mobile/consumer DNS services, but + * we don't worry about that. What is important is that + * a scan for "example.org" in the logs will exclusively find + * entries from this referrer. + */ + public static final String REFERRER_ORIGIN_HOST = "audit.example.org"; + + /** + * Header: Command: {@value}. + * Set by tool runner. + */ + public static final String PARAM_COMMAND = "cm"; + + /** + * Header: FileSystem ID: {@value}. + */ + public static final String PARAM_FILESYSTEM_ID = "fs"; + + /** + * Header: operation ID: {@value}. + */ + public static final String PARAM_ID = "id"; + + /** + * JobID query header: {@value}. + */ + public static final String PARAM_JOB_ID = "ji"; + + /** + * Header: operation: {@value}. + * These should be from StoreStatisticNames or similar, + * and are expected to be at the granularity of FS + * API operations. + */ + public static final String PARAM_OP = "op"; + + /** + * Header: first path of operation: {@value}. + */ + public static final String PARAM_PATH = "p1"; + + /** + * Header: second path of operation: {@value}. + */ + public static final String PARAM_PATH2 = "p2"; + + /** + * Header: Principal: {@value}. + */ + public static final String PARAM_PRINCIPAL = "pr"; + + /** + * Header: Process ID: {@value}. + */ + public static final String PARAM_PROCESS = "ps"; + + /** + * Thread 0: the thread which created a span {@value}. + */ + public static final String PARAM_THREAD0 = "t0"; + + /** + * Thread 1: the thread making the S3 request: {@value}. + */ + public static final String PARAM_THREAD1 = "t1"; + + /** + * Timestamp of span creation: {@value}. + */ + public static final String PARAM_TIMESTAMP = "ts"; + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditStatisticNames.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditStatisticNames.java new file mode 100644 index 0000000000000..0ee9d626bd9c7 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/AuditStatisticNames.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.audit; + +/** + * Statistic Names for Auditing. + */ +public final class AuditStatisticNames { + + private AuditStatisticNames() { + } + + /** + * Audit failure: {@value}. + */ + public static final String AUDIT_FAILURE = "audit_failure"; + + /** + * A request was executed and the auditor invoked: {@value}. + */ + public static final String AUDIT_REQUEST_EXECUTION + = "audit_request_execution"; + + /** + * Audit span created: {@value}. + */ + public static final String AUDIT_SPAN_CREATION = "audit_span_creation"; + + /** + * Access check during audit rejected: {@value}. + */ + public static final String AUDIT_ACCESS_CHECK_FAILURE + = "audit_access_check_failure"; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/CommonAuditContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/CommonAuditContext.java new file mode 100644 index 0000000000000..11681546e3d0a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/CommonAuditContext.java @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.audit; + +import java.util.Iterator; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_COMMAND; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_PROCESS; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_THREAD1; + +/** + * The common audit context is a map of common context information + * which can be used with any audit span. + * This context is shared across all Filesystems within the + * thread. + * Audit spans will be created with a reference to the current + * context of their thread; + * That reference is retained even as they are moved across threads, so + * context information (including thread ID Java runtime). + * + * The Global context entries are a set of key-value pairs which span + * all threads; the {@code HttpReferrerAuditHeader} picks these + * up automatically. It is intended for minimal use of + * shared constant values (process ID, entry point). + * + * An attribute set in {@link #setGlobalContextEntry(String, String)} + * will be set across all audit spans in all threads. + * + * The {@link #noteEntryPoint(Object)} method should be + * used in entry points (ToolRunner.run, etc). It extracts + * the final element of the classname and attaches that + * to the global context with the attribute key + * {@link AuditConstants#PARAM_COMMAND}, if not already + * set. + * This helps identify the application being executued. + * + * All other values set are specific to this context, which + * is thread local. + * The attributes which can be added to ths common context include + * evaluator methods which will be evaluated in whichever thread + * invokes {@link #getEvaluatedEntries()} and then evaluates them. + * That map of evaluated options may evaluated later, in a different + * thread. + * + * For setting and clearing thread-level options, use + * {@link #currentAuditContext()} to get the thread-local + * context for the caller, which can then be manipulated. + * + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public final class CommonAuditContext { + + /** + * Process ID; currently built from UUID and timestamp. + */ + public static final String PROCESS_ID = UUID.randomUUID().toString(); + + /** + * Context values which are global. + * To be used very sparingly. + */ + private static final Map GLOBAL_CONTEXT_MAP = + new ConcurrentHashMap<>(); + + /** + * Map of data. Concurrent so when shared across threads + * there are no problems. + * Supplier operations must themselves be thread safe. + */ + private final Map> evaluatedEntries = + new ConcurrentHashMap<>(); + + static { + // process ID is fixed. + setGlobalContextEntry(PARAM_PROCESS, PROCESS_ID); + } + + /** + * Thread local context. + * Use a weak reference just to keep memory costs down. + * The S3A committers all have a strong reference, so if they are + * retained, context is retained. + * If a span retains the context, then it will also stay valid until + * the span is finalized. + */ + private static final ThreadLocal ACTIVE_CONTEXT = + ThreadLocal.withInitial(() -> createInstance()); + + private CommonAuditContext() { + } + + /** + * Put a context entry. + * @param key key + * @param value new value + * @return old value or null + */ + public Supplier put(String key, String value) { + return evaluatedEntries.put(key, () -> value); + } + + /** + * Put a context entry dynamically evaluated on demand. + * @param key key + * @param value new value + * @return old value or null + */ + public Supplier put(String key, Supplier value) { + return evaluatedEntries.put(key, value); + } + + /** + * Remove a context entry. + * @param key key + */ + public void remove(String key) { + evaluatedEntries.remove(key); + } + + /** + * Get a context entry. + * @param key key + * @return value or null + */ + public String get(String key) { + Supplier supplier = evaluatedEntries.get(key); + return supplier != null + ? supplier.get() + : null; + } + + /** + * Rest the context; will set the standard options again. + * Primarily for testing. + */ + public void reset() { + evaluatedEntries.clear(); + init(); + } + + /** + * Initialize. + */ + private void init() { + + // thread 1 is dynamic + put(PARAM_THREAD1, () -> currentThreadID()); + } + + /** + * Does the context contain a specific key? + * @param key key + * @return true if it is in the context. + */ + public boolean containsKey(String key) { + return evaluatedEntries.containsKey(key); + } + + /** + * Demand invoked to create the instance for this thread. + * @return an instance. + */ + private static CommonAuditContext createInstance() { + CommonAuditContext context = new CommonAuditContext(); + context.init(); + return context; + } + + /** + * Get the current common audit context. Thread local. + * @return the audit context of this thread. + */ + public static CommonAuditContext currentAuditContext() { + return ACTIVE_CONTEXT.get(); + } + + /** + * A thread ID which is unique for this process and shared across all + * S3A clients on the same thread, even those using different FS instances. + * @return a thread ID for reporting. + */ + public static String currentThreadID() { + return Long.toString(Thread.currentThread().getId()); + } + + /** + * Get the evaluated operations. + * This is the map unique to this context. + * @return the operations map. + */ + public Map> getEvaluatedEntries() { + return evaluatedEntries; + } + + /** + * Set a global entry. + * @param key key + * @param value value + */ + public static void setGlobalContextEntry(String key, String value) { + GLOBAL_CONTEXT_MAP.put(key, value); + } + + /** + * Get a global entry. + * @param key key + * @return value or null + */ + public static String getGlobalContextEntry(String key) { + return GLOBAL_CONTEXT_MAP.get(key); + } + + /** + * Remove a global entry. + * @param key key to clear. + */ + public static void removeGlobalContextEntry(String key) { + GLOBAL_CONTEXT_MAP.remove(key); + } + + /** + * Add the entry point as a context entry with the key + * {@link AuditConstants#PARAM_COMMAND} + * if it has not already been recorded. + * This is called via ToolRunner but may be used at any + * other entry point. + * @param tool object loaded/being launched. + */ + public static void noteEntryPoint(Object tool) { + if (tool != null && !GLOBAL_CONTEXT_MAP.containsKey(PARAM_COMMAND)) { + String classname = tool.getClass().toString(); + int lastDot = classname.lastIndexOf('.'); + int l = classname.length(); + if (lastDot > 0 && lastDot < (l - 1)) { + String name = classname.substring(lastDot + 1, l); + setGlobalContextEntry(PARAM_COMMAND, name); + } + } + } + + /** + * Get an iterator over the global entries. + * Thread safe. + * @return an iterable to enumerate the values. + */ + public static Iterable> + getGlobalContextEntries() { + return new GlobalIterable(); + } + + /** + * Iterable to the global iterator. Avoids serving + * up full access to the map. + */ + private static final class GlobalIterable + implements Iterable> { + + @Override + public Iterator> iterator() { + return GLOBAL_CONTEXT_MAP.entrySet().iterator(); + } + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/package-info.java new file mode 100644 index 0000000000000..16c224940dd03 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/audit/package-info.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Public classes for adding information to any auditing information + * picked up by filesystem clients. + * + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +package org.apache.hadoop.fs.audit; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java index c81825776a613..0bdb47730a929 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java @@ -38,6 +38,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.util.functional.RemoteIterators.cleanupRemoteIterator; + /** * An abstract class for the execution of a file system command */ @@ -361,6 +363,7 @@ protected void processPaths(PathData parent, } } } + cleanupRemoteIterator(itemsIterator); } private void processPathInternal(PathData item) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java index dad54ea07bdf1..1ff8d8f0494a1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java @@ -39,6 +39,8 @@ import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.fs.RemoteIterator; +import static org.apache.hadoop.util.functional.RemoteIterators.mappingRemoteIterator; + /** * Encapsulates a Path (path), its FileStatus (stat), and its FileSystem (fs). * PathData ensures that the returned path string will be the same as the @@ -287,20 +289,8 @@ public RemoteIterator getDirectoryContentsIterator() throws IOException { checkIfExists(FileTypeRequirement.SHOULD_BE_DIRECTORY); final RemoteIterator stats = this.fs.listStatusIterator(path); - return new RemoteIterator() { - - @Override - public boolean hasNext() throws IOException { - return stats.hasNext(); - } - - @Override - public PathData next() throws IOException { - FileStatus file = stats.next(); - String child = getStringForChildPath(file.getPath()); - return new PathData(fs, child, file); - } - }; + return mappingRemoteIterator(stats, + file -> new PathData(fs, getStringForChildPath(file.getPath()), file)); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java index 95144393585a1..9ec8dcdb3dc9b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java @@ -40,6 +40,9 @@ public final class StoreStatisticNames { /** {@value}. */ public static final String OP_ABORT = "op_abort"; + /** access() API call {@value}. */ + public static final String OP_ACCESS = "op_access"; + /** {@value}. */ public static final String OP_APPEND = "op_append"; @@ -161,6 +164,10 @@ public final class StoreStatisticNames { public static final String DELEGATION_TOKENS_ISSUED = "delegation_tokens_issued"; + /** Probe for store existing: {@value}. */ + public static final String STORE_EXISTS_PROBE + = "store_exists_probe"; + /** Requests throttled and retried: {@value}. */ public static final String STORE_IO_THROTTLED = "store_io_throttled"; @@ -379,6 +386,9 @@ public final class StoreStatisticNames { public static final String MULTIPART_UPLOAD_STARTED = "multipart_upload_started"; + public static final String MULTIPART_UPLOAD_LIST + = "multipart_upload_list"; + private StoreStatisticNames() { } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EmptyIOStatisticsStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EmptyIOStatisticsStore.java new file mode 100644 index 0000000000000..c970546e6dcb8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EmptyIOStatisticsStore.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.MeanStatistic; + +import static java.util.Collections.emptyMap; + +/** + * An Empty IOStatisticsStore implementation. + */ +final class EmptyIOStatisticsStore implements IOStatisticsStore { + + /** + * The sole instance of this class. + */ + private static final EmptyIOStatisticsStore INSTANCE = + new EmptyIOStatisticsStore(); + /** + * Get the single instance of this class. + * @return a shared, empty instance. + */ + static IOStatisticsStore getInstance() { + return INSTANCE; + } + + private EmptyIOStatisticsStore() { + } + + @Override + public Map counters() { + return emptyMap(); + } + + @Override + public Map gauges() { + return emptyMap(); + } + + @Override + public Map minimums() { + return emptyMap(); + } + + @Override + public Map maximums() { + return emptyMap(); + } + + @Override + public Map meanStatistics() { + return emptyMap(); + } + + @Override + public boolean aggregate(@Nullable final IOStatistics statistics) { + return false; + } + + @Override + public long incrementCounter(final String key, final long value) { + return 0; + } + + @Override + public void setCounter(final String key, final long value) { + + } + + @Override + public void setGauge(final String key, final long value) { + + } + + @Override + public long incrementGauge(final String key, final long value) { + return 0; + } + + @Override + public void setMaximum(final String key, final long value) { + + } + + @Override + public long incrementMaximum(final String key, final long value) { + return 0; + } + + @Override + public void setMinimum(final String key, final long value) { + + } + + @Override + public long incrementMinimum(final String key, final long value) { + return 0; + } + + @Override + public void addMinimumSample(final String key, final long value) { + + } + + @Override + public void addMaximumSample(final String key, final long value) { + + } + + @Override + public void setMeanStatistic(final String key, final MeanStatistic value) { + + } + + @Override + public void addMeanStatisticSample(final String key, final long value) { + + } + + @Override + public void reset() { + + } + + @Override + public AtomicLong getCounterReference(final String key) { + return null; + } + + @Override + public AtomicLong getMaximumReference(final String key) { + return null; + } + + @Override + public AtomicLong getMinimumReference(final String key) { + return null; + } + + @Override + public AtomicLong getGaugeReference(final String key) { + return null; + } + + @Override + public MeanStatistic getMeanStatistic(final String key) { + return null; + } + + @Override + public void addTimedOperation(final String prefix, + final long durationMillis) { + + } + + @Override + public void addTimedOperation(final String prefix, final Duration duration) { + + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsBinding.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsBinding.java index c3507dbc73ef4..1aece01e2fe31 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsBinding.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsBinding.java @@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; import java.util.function.Function; +import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @@ -97,6 +98,15 @@ public static IOStatistics emptyStatistics() { return EmptyIOStatistics.getInstance(); } + /** + * Get the shared instance of the immutable empty statistics + * store. + * @return an empty statistics object. + */ + public static IOStatisticsStore emptyStatisticsStore() { + return EmptyIOStatisticsStore.getInstance(); + } + /** * Take an IOStatistics instance and wrap it in a source. * @param statistics statistics. @@ -573,6 +583,38 @@ public static Callable trackDurationOfCallable( }; } + /** + * Given a Java supplier, evaluate it while + * tracking the duration of the operation and success/failure. + * @param factory factory of duration trackers + * @param statistic statistic key + * @param input input callable. + * @param return type. + * @return the output of the supplier. + */ + public static B trackDurationOfSupplier( + @Nullable DurationTrackerFactory factory, + String statistic, + Supplier input) { + // create the tracker outside try-with-resources so + // that failures can be set in the catcher. + DurationTracker tracker = createTracker(factory, statistic); + try { + // exec the input function and return its value + return input.get(); + } catch (RuntimeException e) { + // input function failed: note it + tracker.failed(); + // and rethrow + throw e; + } finally { + // update the tracker. + // this is called after any catch() call will have + // set the failed flag. + tracker.close(); + } + } + /** * Create the tracker. If the factory is null, a stub * tracker is returned. diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/LogExactlyOnce.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/LogExactlyOnce.java similarity index 81% rename from hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/LogExactlyOnce.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/LogExactlyOnce.java index 54a8836d02ba4..04cd5111e90a2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/LogExactlyOnce.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/LogExactlyOnce.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.fs.s3a.impl; +package org.apache.hadoop.fs.store; import java.util.concurrent.atomic.AtomicBoolean; @@ -39,4 +39,14 @@ public void warn(String format, Object...args) { log.warn(format, args); } } + public void info(String format, Object...args) { + if (!logged.getAndSet(true)) { + log.info(format, args); + } + } + public void error(String format, Object...args) { + if (!logged.getAndSet(true)) { + log.error(format, args); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/ActiveThreadSpanSource.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/ActiveThreadSpanSource.java new file mode 100644 index 0000000000000..4ddb8e1f29072 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/ActiveThreadSpanSource.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.store.audit; + +/** + * Interface to get the active thread span. + * This can be used to collect the active span to + * propagate it into other threads. + * + * FileSystems which track their active span may implement + * this and offer their active span. + */ +public interface ActiveThreadSpanSource { + + /** + * The active span. This may not be a valid span, i.e. there is no guarantee + * that {@code getActiveAuditSpan().isValidSpan()} is true, but + * implementations MUST always return a non-null span. + * @return the currently active span. + */ + T getActiveAuditSpan(); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditEntryPoint.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditEntryPoint.java new file mode 100644 index 0000000000000..6210dd0c8987c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditEntryPoint.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.store.audit; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; + +/** + * A marker attribute simply to highlight which of the methods + * in a FileSystem why are audit entry points. + *

      + *
    1. + * A FS method is an AuditEntryPoint if, on invocation it + * creates and activates an Audit Span for that FS. + *
    2. + *
    3. + * The audit span SHOULD be deactivated before returning, + *
    4. + *
    5. + * Objects returned by the API call which go on + * to make calls of the filesystem MUST perform + * all IO within the same audit span. + *
    6. + *
    7. + * Audit Entry points SHOULD NOT invoke other Audit Entry Points. + * This is to ensure the original audit span information + * is not replaced. + *
    8. + *
    + * FileSystem methods the entry point then invokes + * SHOULD NOT invoke audit entry points internally. + * + * All external methods MUST be audit entry points. + */ +@Documented +@Retention(RetentionPolicy.SOURCE) +public @interface AuditEntryPoint { +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditSpan.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditSpan.java new file mode 100644 index 0000000000000..ecdaf71c11132 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditSpan.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.store.audit; + +import java.io.Closeable; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This is a span created by an {@link AuditSpanSource}. + * An implementation of a span may carry context which can be picked + * up by the filesystem when activated. + * Each FS can have one active span per thread. + * Different filesystem instances SHALL have different active + * spans (if they support them) + * A span is activated in a thread when {@link #activate()} + * is called. + * The span stays active in that thread until {@link #deactivate()} + * is called. + * When deactivated in one thread, it MAY still be active in others. + * There's no explicit "end of span"; this is too hard to manage in + * terms of API lifecycle. + * Similarly, there's no stack of spans. Once a span is activated, + * the previous span is forgotten about. + * Therefore each FS will need a fallback "inactive span" which + * will be reverted to on deactivation of any other span. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public interface AuditSpan extends Closeable { + + /** + * Return a span ID which must be unique for all spans within + * everywhere. That effectively means part of the + * span SHOULD be derived from a UUID. + * Callers MUST NOT make any assumptions about the actual + * contents or structure of this string other than the + * uniqueness. + * @return a non-empty string + */ + String getSpanId(); + + /** + * Get the name of the operation. + * @return the operation name. + */ + String getOperationName(); + + /** + * Timestamp in UTC of span creation. + * @return timestamp. + */ + long getTimestamp(); + + /** + * Make this span active in the current thread. + * @return the activated span. + * This is makes it easy to use in try with resources + */ + AuditSpan activate(); + + /** + * Deactivate the span in the current thread. + */ + void deactivate(); + + /** + * Close calls {@link #deactivate()}; subclasses may override + * but the audit manager's wrapping span will always relay to + * {@link #deactivate()} rather + * than call this method on the wrapped span. + */ + default void close() { + deactivate(); + } + + /** + * Is the span valid? False == this is a span to indicate unbonded. + * @return true if this span represents a real operation. + */ + default boolean isValidSpan() { + return true; + } + + /** + * Set an attribute. + * This may or may not be propagated to audit logs. + * @param key attribute name + * @param value value + */ + default void set(String key, String value) { } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditSpanSource.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditSpanSource.java new file mode 100644 index 0000000000000..4f9f5a64564c0 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditSpanSource.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.store.audit; + +import javax.annotation.Nullable; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A source of audit spans. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public interface AuditSpanSource { + + /** + * Create a span for an operation. + * + * All operation names SHOULD come from + * {@code StoreStatisticNames} or + * {@code StreamStatisticNames}. + * @param operation operation name. + * @param path1 first path of operation + * @param path2 second path of operation + * @return a span for the audit + * @throws IOException failure + */ + T createSpan(String operation, + @Nullable String path1, + @Nullable String path2) + throws IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditingFunctions.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditingFunctions.java new file mode 100644 index 0000000000000..acc82766be190 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/AuditingFunctions.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.store.audit; + +import javax.annotation.Nullable; +import java.util.concurrent.Callable; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.functional.CallableRaisingIOE; +import org.apache.hadoop.util.functional.FunctionRaisingIOE; +import org.apache.hadoop.util.functional.InvocationRaisingIOE; + +/** + * Static methods to assist in working with Audit Spans. + * the {@code withinX} calls take a span and a closure/function etc. + * and return a new function of the same types but which will + * activate and the span. + * They do not deactivate it afterwards to avoid accidentally deactivating + * the already-active span during a chain of operations in the same thread. + * All they do is ensure that the given span is guaranteed to be + * active when the passed in callable/function/invokable is evaluated. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class AuditingFunctions { + + private AuditingFunctions() { + } + + /** + * Given a callable, return a new callable which + * activates and deactivates the span around the inner invocation. + * @param auditSpan audit span + * @param operation operation + * @param type of result + * @return a new invocation. + */ + public static CallableRaisingIOE withinAuditSpan( + @Nullable AuditSpan auditSpan, + CallableRaisingIOE operation) { + return auditSpan == null + ? operation + : () -> { + auditSpan.activate(); + return operation.apply(); + }; + } + + /** + * Given an invocation, return a new invocation which + * activates and deactivates the span around the inner invocation. + * @param auditSpan audit span + * @param operation operation + * @return a new invocation. + */ + public static InvocationRaisingIOE withinAuditSpan( + @Nullable AuditSpan auditSpan, + InvocationRaisingIOE operation) { + return auditSpan == null + ? operation + : () -> { + auditSpan.activate(); + operation.apply(); + }; + } + + /** + * Given a function, return a new function which + * activates and deactivates the span around the inner one. + * @param auditSpan audit span + * @param operation operation + * @return a new invocation. + */ + public static FunctionRaisingIOE withinAuditSpan( + @Nullable AuditSpan auditSpan, + FunctionRaisingIOE operation) { + return auditSpan == null + ? operation + : (x) -> { + auditSpan.activate(); + return operation.apply(x); + }; + } + + /** + * Given a callable, return a new callable which + * activates and deactivates the span around the inner invocation. + * @param auditSpan audit span + * @param operation operation + * @param type of result + * @return a new invocation. + */ + public static Callable callableWithinAuditSpan( + @Nullable AuditSpan auditSpan, + Callable operation) { + return auditSpan == null + ? operation + : () -> { + auditSpan.activate(); + return operation.call(); + }; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/HttpReferrerAuditHeader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/HttpReferrerAuditHeader.java new file mode 100644 index 0000000000000..b2684e758892a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/HttpReferrerAuditHeader.java @@ -0,0 +1,503 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.store.audit; + +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.StringJoiner; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.audit.CommonAuditContext; +import org.apache.hadoop.fs.store.LogExactlyOnce; +import org.apache.http.NameValuePair; +import org.apache.http.client.utils.URLEncodedUtils; + +import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_ID; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_OP; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_PATH; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_PATH2; +import static org.apache.hadoop.fs.audit.AuditConstants.REFERRER_ORIGIN_HOST; + +/** + * Contains all the logic for generating an HTTP "Referer" + * entry; includes escaping query params. + * Tests for this are in + * {@code org.apache.hadoop.fs.s3a.audit.TestHttpReferrerAuditHeader} + * so as to verify that header generation in the S3A auditors, and + * S3 log parsing, all work. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class HttpReferrerAuditHeader { + + /** + * Format of path to build: {@value}. + * the params passed in are (context ID, span ID, op). + * Update + * {@code TestHttpReferrerAuditHeader.SAMPLE_LOG_ENTRY} on changes + */ + public static final String REFERRER_PATH_FORMAT = "/hadoop/1/%3$s/%2$s/"; + + private static final Logger LOG = + LoggerFactory.getLogger(HttpReferrerAuditHeader.class); + + /** + * Log for warning of problems creating headers will only log of + * a problem once per process instance. + * This is to avoid logs being flooded with errors. + */ + private static final LogExactlyOnce WARN_OF_URL_CREATION = + new LogExactlyOnce(LOG); + + /** Context ID. */ + private final String contextId; + + /** operation name. */ + private final String operationName; + + /** Span ID. */ + private final String spanId; + + /** optional first path. */ + private final String path1; + + /** optional second path. */ + private final String path2; + + /** + * The header as created in the constructor; used in toString(). + * A new header is built on demand in {@link #buildHttpReferrer()} + * so that evaluated attributes are dynamically evaluated + * in the correct thread/place. + */ + private final String initialHeader; + + /** + * Map of simple attributes. + */ + private final Map attributes; + + /** + * Parameters dynamically evaluated on the thread just before + * the request is made. + */ + private final Map> evaluated; + + /** + * Elements to filter from the final header. + */ + private final Set filter; + + /** + * Instantiate. + * + * Context and operationId are expected to be well formed + * numeric/hex strings, at least adequate to be + * used as individual path elements in a URL. + */ + private HttpReferrerAuditHeader( + final Builder builder) { + this.contextId = requireNonNull(builder.contextId); + this.evaluated = builder.evaluated; + this.filter = builder.filter; + this.operationName = requireNonNull(builder.operationName); + this.path1 = builder.path1; + this.path2 = builder.path2; + this.spanId = requireNonNull(builder.spanId); + + // copy the parameters from the builder and extend + attributes = builder.attributes; + + addAttribute(PARAM_OP, operationName); + addAttribute(PARAM_PATH, path1); + addAttribute(PARAM_PATH2, path2); + addAttribute(PARAM_ID, spanId); + + // patch in global context values where not set + Iterable> globalContextValues + = builder.globalContextValues; + if (globalContextValues != null) { + for (Map.Entry entry : globalContextValues) { + attributes.putIfAbsent(entry.getKey(), entry.getValue()); + } + } + + // build the referrer up. so as to find/report problems early + initialHeader = buildHttpReferrer(); + } + + /** + * Build the referrer string. + * This includes dynamically evaluating all of the evaluated + * attributes. + * If there is an error creating the string it will be logged once + * per entry, and "" returned. + * @return a referrer string or "" + */ + public String buildHttpReferrer() { + + String header; + try { + String queries; + // Update any params which are dynamically evaluated + evaluated.forEach((key, eval) -> + addAttribute(key, eval.get())); + // now build the query parameters from all attributes, static and + // evaluated, stripping out any from the filter + queries = attributes.entrySet().stream() + .filter(e -> !filter.contains(e.getKey())) + .map(e -> e.getKey() + "=" + e.getValue()) + .collect(Collectors.joining("&")); + final URI uri = new URI("https", REFERRER_ORIGIN_HOST, + String.format(Locale.ENGLISH, REFERRER_PATH_FORMAT, + contextId, spanId, operationName), + queries, + null); + header = uri.toASCIIString(); + } catch (URISyntaxException e) { + WARN_OF_URL_CREATION.warn("Failed to build URI for auditor: " + e, e); + header = ""; + } + return header; + } + + /** + * Add a query parameter if not null/empty + * There's no need to escape here as it is done in the URI + * constructor. + * @param key query key + * @param value query value + */ + private void addAttribute(String key, + String value) { + if (StringUtils.isNotEmpty(value)) { + attributes.put(key, value); + } + } + + /** + * Set an attribute. If the value is non-null/empty, + * it will be used as a query parameter. + * + * @param key key to set + * @param value value. + */ + public void set(final String key, final String value) { + addAttribute(requireNonNull(key), value); + } + + public String getContextId() { + return contextId; + } + + public String getOperationName() { + return operationName; + } + + public String getSpanId() { + return spanId; + } + + public String getPath1() { + return path1; + } + + public String getPath2() { + return path2; + } + + @Override + public String toString() { + return new StringJoiner(", ", + HttpReferrerAuditHeader.class.getSimpleName() + "[", "]") + .add(initialHeader) + .toString(); + } + + /** + * Perform any escaping to valid path elements in advance of + * new URI() doing this itself. Only path separators need to + * be escaped/converted at this point. + * @param source source string + * @return an escaped path element. + */ + public static String escapeToPathElement(CharSequence source) { + int len = source.length(); + StringBuilder r = new StringBuilder(len); + for (int i = 0; i < len; i++) { + char c = source.charAt(i); + String s = Character.toString(c); + switch (c) { + case '/': + case '@': + s = "+"; + break; + default: + break; + } + r.append(s); + } + return r.toString(); + + } + + /** + * Strip any quotes from around a header. + * This is needed when processing log entries. + * @param header field. + * @return field without quotes. + */ + public static String maybeStripWrappedQuotes(String header) { + String h = header; + // remove quotes if needed. + while (h.startsWith("\"")) { + h = h.substring(1); + } + while (h.endsWith("\"")) { + h = h.substring(0, h.length() - 1); + } + return h; + } + + /** + * Split up the string. Uses httpClient: make sure it is on the classpath. + * Any query param with a name but no value, e.g ?something is + * returned in the map with an empty string as the value. + * @param header URI to parse + * @return a map of parameters. + * @throws URISyntaxException failure to build URI from header. + */ + public static Map extractQueryParameters(String header) + throws URISyntaxException { + URI uri = new URI(maybeStripWrappedQuotes(header)); + // get the decoded query + List params = URLEncodedUtils.parse(uri, + StandardCharsets.UTF_8); + Map result = new HashMap<>(params.size()); + for (NameValuePair param : params) { + String name = param.getName(); + String value = param.getValue(); + if (value == null) { + value = ""; + } + result.put(name, value); + } + return result; + } + + /** + * Get a builder. + * @return a new builder. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder. + * + * Context and operationId are expected to be well formed + * numeric/hex strings, at least adequate to be + * used as individual path elements in a URL. + */ + public static final class Builder { + + /** Context ID. */ + private String contextId; + + /** operation name. */ + private String operationName; + + /** operation ID. */ + private String spanId; + + /** optional first path. */ + private String path1; + + /** optional second path. */ + private String path2; + + /** Map of attributes to add as query parameters. */ + private final Map attributes = new HashMap<>(); + + /** + * Parameters dynamically evaluated on the thread just before + * the request is made. + */ + private final Map> evaluated = + new HashMap<>(); + + /** + * Global context values; defaults to that of + * {@link CommonAuditContext#getGlobalContextEntries()} and + * should not need to be changed. + */ + private Iterable> globalContextValues = + CommonAuditContext.getGlobalContextEntries(); + + /** + * Elements to filter from the final header. + */ + private Set filter = new HashSet<>(); + + private Builder() { + } + + /** + * Build. + * @return an HttpReferrerAuditHeader + */ + public HttpReferrerAuditHeader build() { + return new HttpReferrerAuditHeader(this); + } + + /** + * Set context ID. + * @param value context + * @return the builder + */ + public Builder withContextId(final String value) { + contextId = value; + return this; + } + + /** + * Set Operation name. + * @param value new value + * @return the builder + */ + public Builder withOperationName(final String value) { + operationName = value; + return this; + } + + /** + * Set ID. + * @param value new value + * @return the builder + */ + public Builder withSpanId(final String value) { + spanId = value; + return this; + } + + /** + * Set Path1 of operation. + * @param value new value + * @return the builder + */ + public Builder withPath1(final String value) { + path1 = value; + return this; + } + + /** + * Set Path2 of operation. + * @param value new value + * @return the builder + */ + public Builder withPath2(final String value) { + path2 = value; + return this; + } + + /** + * Add all attributes to the current map. + * @param value new value + * @return the builder + */ + public Builder withAttributes(final Map value) { + attributes.putAll(value); + return this; + } + + /** + * Add an attribute to the current map. + * Replaces any with the existing key. + * @param key key to set/update + * @param value new value + * @return the builder + */ + public Builder withAttribute(String key, String value) { + attributes.put(key, value); + return this; + } + + /** + * Add all evaluated attributes to the current map. + * @param value new value + * @return the builder + */ + public Builder withEvaluated(final Map> value) { + evaluated.putAll(value); + return this; + } + + /** + * Add an evaluated attribute to the current map. + * Replaces any with the existing key. + * Set evaluated methods. + * @param key key + * @param value new value + * @return the builder + */ + public Builder withEvaluated(String key, Supplier value) { + evaluated.put(key, value); + return this; + } + + /** + * Set the global context values (replaces the default binding + * to {@link CommonAuditContext#getGlobalContextEntries()}). + * @param value new value + * @return the builder + */ + public Builder withGlobalContextValues( + final Iterable> value) { + globalContextValues = value; + return this; + } + + /** + * Declare the fields to filter. + * @param fields iterable of field names. + * @return the builder + */ + public Builder withFilter(final Collection fields) { + this.filter = new HashSet<>(fields); + return this; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/package-info.java new file mode 100644 index 0000000000000..98fb5b59c3ac0 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/audit/package-info.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Auditing classes for internal + * use within the hadoop-* modules only. No stability guarantees. + * The public/evolving API is in {@code org.apache.hadoop.fs.audit}. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +package org.apache.hadoop.fs.store.audit; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java index 47aabed89cdf0..70e093e1e4870 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java @@ -36,6 +36,7 @@ import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.audit.CommonAuditContext; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.service.Service; import org.apache.hadoop.util.ExitCodeProvider; @@ -591,6 +592,7 @@ protected int coreServiceLaunch(Configuration conf, } String name = getServiceName(); LOG.debug("Launched service {}", name); + CommonAuditContext.noteEntryPoint(service); LaunchableService launchableService = null; if (service instanceof LaunchableService) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java index 8740be49d97bc..336700a6e276a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.audit.CommonAuditContext; import org.apache.hadoop.ipc.CallerContext; /** @@ -63,6 +64,10 @@ public static int run(Configuration conf, Tool tool, String[] args) CallerContext ctx = new CallerContext.Builder("CLI").build(); CallerContext.setCurrent(ctx); } + // Note the entry point in the audit context; this + // may be used in audit events set to cloud store logs + // or elsewhere. + CommonAuditContext.noteEntryPoint(tool); if(conf == null) { conf = new Configuration(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/RemoteIterators.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/RemoteIterators.java index 3ac0fced1493d..5fdea4f5b747a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/RemoteIterators.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/RemoteIterators.java @@ -189,6 +189,7 @@ public static RemoteIterator closingRemoteIterator( /** * Build a list from a RemoteIterator. + * @param source source iterator * @param type * @return a list of the values. * @throws IOException if the source RemoteIterator raises it. @@ -202,12 +203,17 @@ public static List toList(RemoteIterator source) /** * Build an array from a RemoteIterator. + * @param source source iterator + * @param a destination array; if too small a new array + * of the same type is created * @param type * @return an array of the values. * @throws IOException if the source RemoteIterator raises it. */ - public static T[] toArray(RemoteIterator source) throws IOException { - return (T[]) toList(source).toArray(); + public static T[] toArray(RemoteIterator source, + T[] a) throws IOException { + List list = toList(source); + return list.toArray(a); } /** @@ -240,18 +246,28 @@ public static long foreach( consumer.accept(source.next()); } - // maybe log the results - logIOStatisticsAtDebug(LOG, "RemoteIterator Statistics: {}", source); } finally { - if (source instanceof Closeable) { - // source is closeable, so close. - IOUtils.cleanupWithLogger(LOG, (Closeable) source); - } + cleanupRemoteIterator(source); } - return count; } + /** + * Clean up after an iteration. + * If the log is at debug, calculate and log the IOStatistics. + * If the iterator is closeable, cast and then cleanup the iterator + * @param source iterator source + * @param type of source + */ + public static void cleanupRemoteIterator(RemoteIterator source) { + // maybe log the results + logIOStatisticsAtDebug(LOG, "RemoteIterator Statistics: {}", source); + if (source instanceof Closeable) { + /* source is closeable, so close.*/ + IOUtils.cleanupWithLogger(LOG, (Closeable) source); + } + } + /** * A remote iterator from a singleton. It has a single next() * value, after which hasNext() returns false and next() fails. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/audit/TestCommonAuditContext.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/audit/TestCommonAuditContext.java new file mode 100644 index 0000000000000..798841a2d6905 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/audit/TestCommonAuditContext.java @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.audit; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import org.assertj.core.api.AbstractStringAssert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_COMMAND; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_PROCESS; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_THREAD1; +import static org.apache.hadoop.fs.audit.CommonAuditContext.PROCESS_ID; +import static org.apache.hadoop.fs.audit.CommonAuditContext.removeGlobalContextEntry; +import static org.apache.hadoop.fs.audit.CommonAuditContext.currentAuditContext; +import static org.apache.hadoop.fs.audit.CommonAuditContext.getGlobalContextEntry; +import static org.apache.hadoop.fs.audit.CommonAuditContext.getGlobalContextEntries; +import static org.apache.hadoop.fs.audit.CommonAuditContext.noteEntryPoint; +import static org.apache.hadoop.fs.audit.CommonAuditContext.setGlobalContextEntry; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Tests of the common audit context. + */ +public class TestCommonAuditContext extends AbstractHadoopTestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(TestCommonAuditContext.class); + + private final CommonAuditContext context = currentAuditContext(); + /** + * We can set, get and enumerate global context values. + */ + @Test + public void testGlobalSetGetEnum() throws Throwable { + + String s = "command"; + setGlobalContextEntry(PARAM_COMMAND, s); + assertGlobalEntry(PARAM_COMMAND) + .isEqualTo(s); + // and the iterators. + List> list = StreamSupport + .stream(getGlobalContextEntries().spliterator(), + false) + .filter(e -> e.getKey().equals(PARAM_COMMAND)) + .collect(Collectors.toList()); + assertThat(list) + .hasSize(1) + .allMatch(e -> e.getValue().equals(s)); + } + + @Test + public void testVerifyProcessID() throws Throwable { + assertThat( + getGlobalContextEntry(PARAM_PROCESS)) + .describedAs("global context value of %s", PARAM_PROCESS) + .isEqualTo(PROCESS_ID); + } + + + @Test + public void testNullValue() throws Throwable { + assertThat(context.get(PARAM_PROCESS)) + .describedAs("Value of context element %s", PARAM_PROCESS) + .isNull(); + } + + @Test + public void testThreadId() throws Throwable { + String t1 = getContextValue(PARAM_THREAD1); + Long tid = Long.valueOf(t1); + assertThat(tid).describedAs("thread ID") + .isEqualTo(Thread.currentThread().getId()); + } + + /** + * Verify functions are dynamically evaluated. + */ + @Test + public void testDynamicEval() throws Throwable { + context.reset(); + final AtomicBoolean ab = new AtomicBoolean(false); + context.put("key", () -> + Boolean.toString(ab.get())); + assertContextValue("key") + .isEqualTo("false"); + // update the reference and the next get call will + // pick up the new value. + ab.set(true); + assertContextValue("key") + .isEqualTo("true"); + } + + private String getContextValue(final String key) { + String val = context.get(key); + assertThat(val).isNotBlank(); + return val; + } + + /** + * Start an assertion on a context value. + * @param key key to look up + * @return an assert which can be extended call + */ + private AbstractStringAssert assertContextValue(final String key) { + String val = context.get(key); + return assertThat(val) + .describedAs("Value of context element %s", key) + .isNotBlank(); + } + + @Test + public void testNoteEntryPoint() throws Throwable { + setAndAssertEntryPoint(this).isEqualTo("TestCommonAuditContext"); + + } + + @Test + public void testNoteNullEntryPoint() throws Throwable { + setAndAssertEntryPoint(null).isNull(); + } + + private AbstractStringAssert setAndAssertEntryPoint(final Object tool) { + removeGlobalContextEntry(PARAM_COMMAND); + noteEntryPoint(tool); + AbstractStringAssert anAssert = assertGlobalEntry( + PARAM_COMMAND); + return anAssert; + } + + private AbstractStringAssert assertGlobalEntry(final String key) { + AbstractStringAssert anAssert = assertThat(getGlobalContextEntry(key)) + .describedAs("Global context value %s", key); + return anAssert; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java index 35193fa2dc712..e13a49ca10e70 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java @@ -399,9 +399,7 @@ public static boolean rm(FileSystem fileSystem, IOException { if (fileSystem != null) { rejectRootOperation(path, allowRootDelete); - if (fileSystem.exists(path)) { - return fileSystem.delete(path, recursive); - } + return fileSystem.delete(path, recursive); } return false; @@ -728,8 +726,10 @@ public static void assertDeleted(FileSystem fs, assertPathExists(fs, "about to be deleted file", file); } boolean deleted = fs.delete(file, recursive); - String dir = ls(fs, file.getParent()); - assertTrue("Delete failed on " + file + ": " + dir, deleted); + if (!deleted) { + String dir = ls(fs, file.getParent()); + assertTrue("Delete failed on " + file + ": " + dir, deleted); + } assertPathDoesNotExist(fs, "Deleted file", file); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/CredentialInitializationException.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/CredentialInitializationException.java index 46655bc99e6fe..2f0cfd37ad37c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/CredentialInitializationException.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/CredentialInitializationException.java @@ -20,12 +20,21 @@ import com.amazonaws.AmazonClientException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + /** * Exception which Hadoop's AWSCredentialsProvider implementations should * throw when there is a problem with the credential setup. This * is a subclass of {@link AmazonClientException} which sets * {@link #isRetryable()} to false, so as to fail fast. + * This is used in credential providers and elsewhere. + * When passed through {@code S3AUtils.translateException()} it + * is mapped to an AccessDeniedException. As a result, the Invoker + * code will automatically translate */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class CredentialInitializationException extends AmazonClientException { public CredentialInitializationException(String message, Throwable t) { super(message, t); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java index 19cd6c985b531..a851f0fbac687 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java @@ -34,6 +34,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.functional.CallableRaisingIOE; +import org.apache.hadoop.util.functional.InvocationRaisingIOE; /** * Class to provide lambda expression invocation of AWS operations. @@ -43,7 +44,8 @@ * the other {@code retry() and retryUntranslated()} calls are wrappers. * * The static {@link #once(String, String, CallableRaisingIOE)} and - * {@link #once(String, String, VoidOperation)} calls take an operation and + * {@link #once(String, String, InvocationRaisingIOE)} calls take an + * operation and * return it with AWS exceptions translated to IOEs of some form. * * The retry logic on a failure is defined by the retry policy passed in @@ -57,7 +59,7 @@ * but before the sleep. * These callbacks can be used for reporting and incrementing statistics. * - * The static {@link #quietly(String, String, VoidOperation)} and + * The static {@link #quietly(String, String, InvocationRaisingIOE)} and * {@link #quietlyEval(String, String, CallableRaisingIOE)} calls exist to * take any operation and quietly catch and log at debug. * The return value of {@link #quietlyEval(String, String, CallableRaisingIOE)} @@ -126,11 +128,11 @@ public static T once(String action, String path, * @throws IOException any IOE raised, or translated exception */ @Retries.OnceTranslated - public static void once(String action, String path, VoidOperation operation) - throws IOException { + public static void once(String action, String path, + InvocationRaisingIOE operation) throws IOException { once(action, path, () -> { - operation.execute(); + operation.apply(); return null; }); } @@ -171,10 +173,10 @@ public static void ignoreIOExceptions( Logger log, String action, String path, - VoidOperation operation) { + InvocationRaisingIOE operation) { ignoreIOExceptions(log, action, path, () -> { - operation.execute(); + operation.apply(); return null; }); } @@ -194,11 +196,11 @@ public void retry(String action, String path, boolean idempotent, Retried retrying, - VoidOperation operation) + InvocationRaisingIOE operation) throws IOException { retry(action, path, idempotent, retrying, () -> { - operation.execute(); + operation.apply(); return null; }); } @@ -221,11 +223,11 @@ public void maybeRetry(boolean doRetry, String path, boolean idempotent, Retried retrying, - VoidOperation operation) + InvocationRaisingIOE operation) throws IOException { maybeRetry(doRetry, action, path, idempotent, retrying, () -> { - operation.execute(); + operation.apply(); return null; }); } @@ -243,7 +245,7 @@ public void maybeRetry(boolean doRetry, public void retry(String action, String path, boolean idempotent, - VoidOperation operation) + InvocationRaisingIOE operation) throws IOException { retry(action, path, idempotent, retryCallback, operation); } @@ -265,7 +267,7 @@ public void maybeRetry( String action, String path, boolean idempotent, - VoidOperation operation) + InvocationRaisingIOE operation) throws IOException { maybeRetry(doRetry, action, path, idempotent, retryCallback, operation); } @@ -475,7 +477,7 @@ public T retryUntranslated( */ public static void quietly(String action, String path, - VoidOperation operation) { + InvocationRaisingIOE operation) { try { once(action, path, operation); } catch (Exception e) { @@ -515,14 +517,6 @@ private static String toDescription(String action, @Nullable String path) { (StringUtils.isNotEmpty(path) ? (" on " + path) : ""); } - /** - * Void operation which may raise an IOException. - */ - @FunctionalInterface - public interface VoidOperation { - void execute() throws IOException; - } - /** * Callback for retry and notification operations. * Even if the interface is throwing up "raw" exceptions, this handler diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java index 3cb3d5d832df3..113e6f4de2abd 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java @@ -22,6 +22,7 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.services.s3.model.S3ObjectSummary; + import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.tuple.Triple; @@ -41,10 +42,12 @@ import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.fs.store.audit.AuditSpan; import org.apache.hadoop.util.functional.RemoteIterators; import org.slf4j.Logger; +import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; import java.time.Instant; @@ -79,6 +82,9 @@ /** * Place for the S3A listing classes; keeps all the small classes under control. + * + * Spans passed in are attached to the listing iterators returned, but are not + * closed at the end of the iteration. This is because the same span */ @InterfaceAudience.Private public class Listing extends AbstractStoreOperation { @@ -137,16 +143,19 @@ public static RemoteIterator toProvidedFileStatusIterator( * @param filter the filter on which paths to accept * @param acceptor the class/predicate to decide which entries to accept * in the listing based on the full file status. + * @param span audit span for this iterator * @return the iterator * @throws IOException IO Problems */ + @Retries.RetryRaw public FileStatusListingIterator createFileStatusListingIterator( Path listPath, S3ListRequest request, PathFilter filter, - Listing.FileStatusAcceptor acceptor) throws IOException { + Listing.FileStatusAcceptor acceptor, + AuditSpan span) throws IOException { return createFileStatusListingIterator(listPath, request, filter, acceptor, - null); + null, span); } /** @@ -159,6 +168,7 @@ public FileStatusListingIterator createFileStatusListingIterator( * in the listing based on the full file status. * @param providedStatus the provided list of file status, which may contain * items that are not listed from source. + * @param span audit span for this iterator * @return the iterator * @throws IOException IO Problems */ @@ -168,9 +178,10 @@ public FileStatusListingIterator createFileStatusListingIterator( S3ListRequest request, PathFilter filter, Listing.FileStatusAcceptor acceptor, - RemoteIterator providedStatus) throws IOException { + RemoteIterator providedStatus, + AuditSpan span) throws IOException { return new FileStatusListingIterator( - createObjectListingIterator(listPath, request), + createObjectListingIterator(listPath, request, span), filter, acceptor, providedStatus); @@ -181,14 +192,16 @@ public FileStatusListingIterator createFileStatusListingIterator( * list object request. * @param listPath path of the listing * @param request initial request to make + * @param span audit span for this iterator * @return the iterator * @throws IOException IO Problems */ @Retries.RetryRaw - public ObjectListingIterator createObjectListingIterator( + private ObjectListingIterator createObjectListingIterator( final Path listPath, - final S3ListRequest request) throws IOException { - return new ObjectListingIterator(listPath, request); + final S3ListRequest request, + final AuditSpan span) throws IOException { + return new ObjectListingIterator(listPath, request, span); } /** @@ -245,6 +258,7 @@ public RemoteIterator createSingleStatusIterator( * @param forceNonAuthoritativeMS forces metadata store to act like non * authoritative. This is useful when * listFiles output is used by import tool. + * @param span audit span for this iterator * @return an iterator over listing. * @throws IOException any exception. */ @@ -252,7 +266,8 @@ public RemoteIterator getListFilesAssumingDir( Path path, boolean recursive, Listing.FileStatusAcceptor acceptor, boolean collectTombstones, - boolean forceNonAuthoritativeMS) throws IOException { + boolean forceNonAuthoritativeMS, + AuditSpan span) throws IOException { String key = maybeAddTrailingSlash(pathToKey(path)); String delimiter = recursive ? null : "/"; @@ -325,10 +340,13 @@ public RemoteIterator getListFilesAssumingDir( createLocatedFileStatusIterator( createFileStatusListingIterator(path, listingOperationCallbacks - .createListObjectsRequest(key, delimiter), + .createListObjectsRequest(key, + delimiter, + span), ACCEPT_ALL, acceptor, - cachedFilesIterator)), + cachedFilesIterator, + span)), collectTombstones ? tombstones : null); } @@ -337,11 +355,13 @@ public RemoteIterator getListFilesAssumingDir( * Also performing tombstone reconciliation for guarded directories. * @param dir directory to check. * @param filter a path filter. + * @param span audit span for this iterator * @return an iterator that traverses statuses of the given dir. * @throws IOException in case of failure. */ public RemoteIterator getLocatedFileStatusIteratorForDir( - Path dir, PathFilter filter) throws IOException { + Path dir, PathFilter filter, AuditSpan span) throws IOException { + span.activate(); final String key = maybeAddTrailingSlash(pathToKey(dir)); final Listing.FileStatusAcceptor acceptor = new Listing.AcceptAllButSelfAndS3nDirs(dir); @@ -353,39 +373,55 @@ public RemoteIterator getLocatedFileStatusIteratorForDir( listingOperationCallbacks .getUpdatedTtlTimeProvider(), allowAuthoritative); - Set tombstones = meta != null - ? meta.listTombstones() - : null; - final RemoteIterator cachedFileStatusIterator = - createProvidedFileStatusIterator( - S3Guard.dirMetaToStatuses(meta), filter, acceptor); - return (allowAuthoritative && meta != null - && meta.isAuthoritative()) - ? createLocatedFileStatusIterator( - cachedFileStatusIterator) - : createTombstoneReconcilingIterator( + if (meta != null) { + // there's metadata + // convert to an iterator + final RemoteIterator cachedFileStatusIterator = + createProvidedFileStatusIterator( + S3Guard.dirMetaToStatuses(meta), filter, acceptor); + + // if the dir is authoritative and the data considers itself + // to be authorititative. + if (allowAuthoritative && meta.isAuthoritative()) { + // return the list + return createLocatedFileStatusIterator(cachedFileStatusIterator); + } else { + // merge the datasets + return createTombstoneReconcilingIterator( createLocatedFileStatusIterator( - createFileStatusListingIterator(dir, - listingOperationCallbacks - .createListObjectsRequest(key, "/"), - filter, - acceptor, - cachedFileStatusIterator)), - tombstones); + createFileStatusListingIterator(dir, + listingOperationCallbacks + .createListObjectsRequest(key, "/", span), + filter, + acceptor, + cachedFileStatusIterator, + span)), + meta.listTombstones()); + } + } else { + // Unguarded + return createLocatedFileStatusIterator( + createFileStatusListingIterator(dir, + listingOperationCallbacks + .createListObjectsRequest(key, "/", span), + filter, + acceptor, + span)); + } } /** * Calculate list of file statuses assuming path * to be a non-empty directory. * @param path input path. + * @param span audit span for this iterator * @return Triple of file statuses, metaData, auth flag. * @throws IOException Any IO problems. */ public Triple, DirListingMetadata, Boolean> - getFileStatusesAssumingNonEmptyDir(Path path) + getFileStatusesAssumingNonEmptyDir(Path path, final AuditSpan span) throws IOException { String key = pathToKey(path); - List result; if (!key.isEmpty()) { key = key + '/'; } @@ -408,14 +444,15 @@ public RemoteIterator getLocatedFileStatusIteratorForDir( dirMeta, Boolean.TRUE); } - S3ListRequest request = createListObjectsRequest(key, "/"); + S3ListRequest request = createListObjectsRequest(key, "/", span); LOG.debug("listStatus: doing listObjects for directory {}", key); FileStatusListingIterator filesItr = createFileStatusListingIterator( path, request, ACCEPT_ALL, - new Listing.AcceptAllButSelfAndS3nDirs(path)); + new Listing.AcceptAllButSelfAndS3nDirs(path), + span); // return the results obtained from s3. return Triple.of( @@ -424,8 +461,11 @@ public RemoteIterator getLocatedFileStatusIteratorForDir( Boolean.FALSE); } - public S3ListRequest createListObjectsRequest(String key, String delimiter) { - return listingOperationCallbacks.createListObjectsRequest(key, delimiter); + public S3ListRequest createListObjectsRequest(String key, + String delimiter, + final AuditSpan span) { + return listingOperationCallbacks.createListObjectsRequest(key, delimiter, + span); } /** @@ -730,11 +770,13 @@ public String toString() { * Thread safety: none. */ class ObjectListingIterator implements RemoteIterator, - IOStatisticsSource { + IOStatisticsSource, Closeable { /** The path listed. */ private final Path listPath; + private final AuditSpan span; + /** The most recent listing results. */ private S3ListResult objects; @@ -772,12 +814,14 @@ class ObjectListingIterator implements RemoteIterator, * initial set of results/fail if there was a problem talking to the bucket. * @param listPath path of the listing * @param request initial request to make + * @param span audit span for this iterator. * @throws IOException if listObjects raises one. */ @Retries.RetryRaw ObjectListingIterator( Path listPath, - S3ListRequest request) throws IOException { + S3ListRequest request, + AuditSpan span) throws IOException { this.listPath = listPath; this.maxKeys = listingOperationCallbacks.getMaxKeys(); this.request = request; @@ -786,8 +830,9 @@ class ObjectListingIterator implements RemoteIterator, .withDurationTracking(OBJECT_LIST_REQUEST) .withDurationTracking(OBJECT_CONTINUE_LIST_REQUEST) .build(); + this.span = span; this.s3ListResultFuture = listingOperationCallbacks - .listObjectsAsync(request, iostats); + .listObjectsAsync(request, iostats, span); } /** @@ -851,7 +896,7 @@ private void fetchNextBatchAsyncIfPresent() throws IOException { LOG.debug("[{}], Requesting next {} objects under {}", listingCount, maxKeys, listPath); s3ListResultFuture = listingOperationCallbacks - .continueListObjectsAsync(request, objects, iostats); + .continueListObjectsAsync(request, objects, iostats, span); } } @@ -883,6 +928,14 @@ public Path getListPath() { public int getListingCount() { return listingCount; } + + /** + * Close, if actually called, will close the span + * this listing was created with. + */ + @Override + public void close() { + } } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/MultipartUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/MultipartUtils.java index 6eb490f2df482..d8c820cd8a121 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/MultipartUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/MultipartUtils.java @@ -31,11 +31,20 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.store.audit.AuditSpan; + +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_LIST; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfOperation; /** * MultipartUtils upload-specific functions for use by S3AFileSystem and Hadoop * CLI. + * The Audit span active when + * {@link #listMultipartUploads(StoreContext, AmazonS3, String, int)} + * was invoked is retained for all subsequent operations. */ public final class MultipartUtils { @@ -48,33 +57,47 @@ private MultipartUtils() { } /** * List outstanding multipart uploads. * Package private: S3AFileSystem and tests are the users of this. + * + * @param storeContext store context * @param s3 AmazonS3 client to use. - * @param bucketName name of S3 bucket to use. - * @param maxKeys maximum batch size to request at a time from S3. * @param prefix optional key prefix to narrow search. If null then whole * bucket will be searched. + * @param maxKeys maximum batch size to request at a time from S3. * @return an iterator of matching uploads */ - static MultipartUtils.UploadIterator listMultipartUploads(AmazonS3 s3, - Invoker invoker, String bucketName, int maxKeys, @Nullable String prefix) + static MultipartUtils.UploadIterator listMultipartUploads( + final StoreContext storeContext, + AmazonS3 s3, + @Nullable String prefix, + int maxKeys) throws IOException { - return new MultipartUtils.UploadIterator(s3, invoker, bucketName, maxKeys, + return new MultipartUtils.UploadIterator(storeContext, + s3, + maxKeys, prefix); } /** * Simple RemoteIterator wrapper for AWS `listMultipartUpload` API. * Iterates over batches of multipart upload metadata listings. + * All requests are in the StoreContext's active span + * at the time the iterator was constructed. */ static class ListingIterator implements RemoteIterator { - private final String bucketName; private final String prefix; + + private final RequestFactory requestFactory; + private final int maxKeys; private final AmazonS3 s3; private final Invoker invoker; + private final AuditSpan auditSpan; + + private final StoreContext storeContext; + /** * Most recent listing results. */ @@ -85,16 +108,24 @@ static class ListingIterator implements */ private boolean firstListing = true; - private int listCount = 1; + /** + * Count of list calls made. + */ + private int listCount = 0; - ListingIterator(AmazonS3 s3, Invoker invoker, String bucketName, - int maxKeys, @Nullable String prefix) throws IOException { + ListingIterator(final StoreContext storeContext, + AmazonS3 s3, + @Nullable String prefix, + int maxKeys) throws IOException { + this.storeContext = storeContext; this.s3 = s3; - this.bucketName = bucketName; + this.requestFactory = storeContext.getRequestFactory(); this.maxKeys = maxKeys; this.prefix = prefix; - this.invoker = invoker; + this.invoker = storeContext.getInvoker(); + this.auditSpan = storeContext.getActiveAuditSpan(); + // request the first listing. requestNextBatch(); } @@ -138,31 +169,36 @@ public MultipartUploadListing next() throws IOException { @Override public String toString() { - return "Upload iterator: prefix " + prefix + "; list count " + - listCount + "; isTruncated=" + listing.isTruncated(); + return "Upload iterator: prefix " + prefix + + "; list count " + listCount + + "; upload count " + listing.getMultipartUploads().size() + + "; isTruncated=" + listing.isTruncated(); } @Retries.RetryTranslated private void requestNextBatch() throws IOException { - ListMultipartUploadsRequest req = - new ListMultipartUploadsRequest(bucketName); - if (prefix != null) { - req.setPrefix(prefix); - } - if (!firstListing) { - req.setKeyMarker(listing.getNextKeyMarker()); - req.setUploadIdMarker(listing.getNextUploadIdMarker()); - } - req.setMaxUploads(listCount); + try (AuditSpan span = auditSpan.activate()) { + ListMultipartUploadsRequest req = requestFactory + .newListMultipartUploadsRequest(prefix); + if (!firstListing) { + req.setKeyMarker(listing.getNextKeyMarker()); + req.setUploadIdMarker(listing.getNextUploadIdMarker()); + } + req.setMaxUploads(maxKeys); - LOG.debug("[{}], Requesting next {} uploads prefix {}, " + - "next key {}, next upload id {}", listCount, maxKeys, prefix, - req.getKeyMarker(), req.getUploadIdMarker()); - listCount++; + LOG.debug("[{}], Requesting next {} uploads prefix {}, " + + "next key {}, next upload id {}", listCount, maxKeys, prefix, + req.getKeyMarker(), req.getUploadIdMarker()); + listCount++; - listing = invoker.retry("listMultipartUploads", prefix, true, - () -> s3.listMultipartUploads(req)); - LOG.debug("New listing state: {}", this); + listing = invoker.retry("listMultipartUploads", prefix, true, + trackDurationOfOperation(storeContext.getInstrumentation(), + MULTIPART_UPLOAD_LIST.getSymbol(), + () -> s3.listMultipartUploads(req))); + LOG.debug("Listing found {} upload(s)", + listing.getMultipartUploads().size()); + LOG.debug("New listing state: {}", this); + } } } @@ -174,6 +210,10 @@ private void requestNextBatch() throws IOException { public static class UploadIterator implements RemoteIterator { + /** + * Iterator for issuing new upload list requests from + * where the previous one ended. + */ private ListingIterator lister; /** Current listing: the last upload listing we fetched. */ private MultipartUploadListing listing; @@ -181,11 +221,15 @@ public static class UploadIterator private ListIterator batchIterator; @Retries.RetryTranslated - public UploadIterator(AmazonS3 s3, Invoker invoker, String bucketName, - int maxKeys, @Nullable String prefix) + public UploadIterator( + final StoreContext storeContext, + AmazonS3 s3, + int maxKeys, + @Nullable String prefix) throws IOException { - lister = new ListingIterator(s3, invoker, bucketName, maxKeys, prefix); + lister = new ListingIterator(storeContext, s3, prefix, + maxKeys); requestNextBatch(); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java index 65b9535ba65ea..5ba39aae2e29a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java @@ -31,6 +31,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import com.amazonaws.SdkBaseException; import com.amazonaws.event.ProgressEvent; import com.amazonaws.event.ProgressEventType; import com.amazonaws.event.ProgressListener; @@ -55,12 +56,12 @@ import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.fs.s3a.commit.CommitConstants; import org.apache.hadoop.fs.s3a.commit.PutTracker; -import org.apache.hadoop.fs.s3a.impl.LogExactlyOnce; import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; import org.apache.hadoop.fs.statistics.DurationTracker; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.fs.statistics.IOStatisticsLogging; import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.store.LogExactlyOnce; import org.apache.hadoop.util.Progressable; import static java.util.Objects.requireNonNull; @@ -134,6 +135,8 @@ class S3ABlockOutputStream extends OutputStream implements /** * Write operation helper; encapsulation of the filesystem operations. + * This contains the audit span for the operation, and activates/deactivates + * it within calls. */ private final WriteOperations writeOperationHelper; @@ -393,6 +396,7 @@ public void close() throws IOException { final List partETags = multiPartUpload.waitForAllPartUploads(); bytes = bytesSubmitted; + // then complete the operation if (putTracker.aboutToComplete(multiPartUpload.getUploadId(), partETags, @@ -777,6 +781,12 @@ private void uploadBlockAsync(final S3ADataBlocks.DataBlock block) uploadData.getUploadStream(), uploadData.getFile(), 0L); + } catch (SdkBaseException aws) { + // catch and translate + IOException e = translateException("upload", key, aws); + // failure to start the upload. + noteUploadFailure(e); + throw e; } catch (IOException e) { // failure to start the upload. noteUploadFailure(e); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 751038bb6d3c3..439d52edc14f5 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -21,7 +21,6 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; import java.io.InterruptedIOException; import java.net.URI; import java.nio.file.AccessDeniedException; @@ -53,12 +52,12 @@ import com.amazonaws.AmazonServiceException; import com.amazonaws.SdkBaseException; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.CopyObjectRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.DeleteObjectsResult; import com.amazonaws.services.s3.model.GetObjectMetadataRequest; +import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; @@ -69,8 +68,7 @@ import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.SSEAwsKeyManagementParams; -import com.amazonaws.services.s3.model.SSECustomerKey; +import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; import com.amazonaws.services.s3.transfer.Copy; @@ -80,6 +78,9 @@ import com.amazonaws.services.s3.transfer.model.CopyResult; import com.amazonaws.services.s3.transfer.model.UploadResult; import com.amazonaws.event.ProgressListener; + +import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; +import org.apache.hadoop.fs.store.audit.ActiveThreadSpanSource; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; @@ -91,11 +92,13 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonPathCapabilities; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Globber; import org.apache.hadoop.fs.impl.OpenFileParameters; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.s3a.auth.SignerManager; import org.apache.hadoop.fs.s3a.auth.delegation.DelegationOperations; import org.apache.hadoop.fs.s3a.auth.delegation.DelegationTokenProvider; @@ -106,12 +109,15 @@ import org.apache.hadoop.fs.s3a.impl.DeleteOperation; import org.apache.hadoop.fs.s3a.impl.DirectoryPolicy; import org.apache.hadoop.fs.s3a.impl.DirectoryPolicyImpl; +import org.apache.hadoop.fs.s3a.impl.GetContentSummaryOperation; import org.apache.hadoop.fs.s3a.impl.HeaderProcessing; import org.apache.hadoop.fs.s3a.impl.InternalConstants; import org.apache.hadoop.fs.s3a.impl.ListingOperationCallbacks; +import org.apache.hadoop.fs.s3a.impl.MkdirOperation; import org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport; import org.apache.hadoop.fs.s3a.impl.OperationCallbacks; import org.apache.hadoop.fs.s3a.impl.RenameOperation; +import org.apache.hadoop.fs.s3a.impl.RequestFactoryImpl; import org.apache.hadoop.fs.s3a.impl.S3AMultipartUploaderBuilder; import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; import org.apache.hadoop.fs.s3a.impl.StoreContext; @@ -123,9 +129,14 @@ import org.apache.hadoop.fs.statistics.DurationTracker; import org.apache.hadoop.fs.statistics.DurationTrackerFactory; import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsLogging; import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.store.audit.AuditEntryPoint; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.fs.store.audit.AuditSpanSource; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.DurationInfo; @@ -144,9 +155,12 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.audit.AuditManagerS3A; +import org.apache.hadoop.fs.s3a.audit.AuditIntegration; +import org.apache.hadoop.fs.s3a.audit.OperationAuditor; import org.apache.hadoop.fs.s3a.auth.RoleModel; import org.apache.hadoop.fs.s3a.auth.delegation.AWSPolicyProvider; -import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecretOperations; import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecrets; import org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens; import org.apache.hadoop.fs.s3a.auth.delegation.AbstractS3ATokenIdentifier; @@ -175,6 +189,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.SemaphoredDelegatingExecutor; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.functional.CallableRaisingIOE; import static java.util.Objects.requireNonNull; import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_LOGGING_LEVEL; @@ -186,7 +201,7 @@ import static org.apache.hadoop.fs.s3a.Listing.toLocatedFileStatusIterator; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.Statistic.*; -import static org.apache.commons.lang3.StringUtils.isNotEmpty; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.INITIALIZE_SPAN; import static org.apache.hadoop.fs.s3a.auth.RolePolicies.STATEMENT_ALLOW_SSE_KMS_RW; import static org.apache.hadoop.fs.s3a.auth.RolePolicies.allowS3Operations; import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.TokenIssuingPolicy.NoTokensAvailable; @@ -197,7 +212,10 @@ import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletionIgnoringExceptions; import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isObjectNotFound; import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isUnknownBucket; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DEFAULT_UPLOAD_PART_COUNT_LIMIT; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DELETE_CONSIDERED_IDEMPOTENT; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_404; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.UPLOAD_PART_COUNT_LIMIT; import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.fixBucketRegion; import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.logDnsLookup; import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.dirMetaToStatuses; @@ -205,8 +223,10 @@ import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_CONTINUE_LIST_REQUEST; import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_LIST_REQUEST; import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.pairedTrackerFactory; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDuration; import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfInvocation; import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfOperation; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfSupplier; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; import static org.apache.hadoop.util.functional.RemoteIterators.typeCastingRemoteIterator; @@ -226,22 +246,14 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class S3AFileSystem extends FileSystem implements StreamCapabilities, - AWSPolicyProvider, DelegationTokenProvider, IOStatisticsSource { + AWSPolicyProvider, DelegationTokenProvider, IOStatisticsSource, + AuditSpanSource, ActiveThreadSpanSource { + /** * Default blocksize as used in blocksize and FS status queries. */ public static final int DEFAULT_BLOCKSIZE = 32 * 1024 * 1024; - /** - * This declared delete as idempotent. - * This is an "interesting" topic in past Hadoop FS work. - * Essentially: with a single caller, DELETE is idempotent - * but in a shared filesystem, it is is very much not so. - * Here, on the basis that isn't a filesystem with consistency guarantees, - * retryable results in files being deleted. - */ - public static final boolean DELETE_CONSIDERED_IDEMPOTENT = true; - private URI uri; private Path workingDir; private String username; @@ -304,7 +316,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, private S3ADataBlocks.BlockFactory blockFactory; private int blockOutputActiveBlocks; private WriteOperationHelper writeHelper; - private SelectBinding selectBinding; private boolean useListV1; private MagicCommitIntegration committerIntegration; @@ -318,12 +329,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, */ private int pageSize; - /** - * Specific operations used by rename and delete operations. - */ - private final S3AFileSystem.OperationCallbacksImpl - operationCallbacks = new OperationCallbacksImpl(); - private final ListingOperationCallbacks listingOperationCallbacks = new ListingOperationCallbacksImpl(); /** @@ -332,14 +337,24 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, private DirectoryPolicy directoryPolicy; /** - * Header processing for XAttr. + * Context accessors for re-use. */ - private HeaderProcessing headerProcessing; + private final ContextAccessors contextAccessors = new ContextAccessorsImpl(); /** - * Context accessors for re-use. + * Factory for AWS requests. */ - private final ContextAccessors contextAccessors = new ContextAccessorsImpl(); + private RequestFactory requestFactory; + + /** + * Audit manager (service lifecycle). + * Creates the audit service and manages the binding of different audit spans + * to different threads. + * Initially this is a no-op manager; once the service is initialized it will + * be replaced with a configured one. + */ + private AuditManagerS3A auditManager = + AuditIntegration.stubAuditManager(); /** Add any deprecated keys. */ @SuppressWarnings("deprecation") @@ -371,6 +386,7 @@ public void initialize(URI name, Configuration originalConf) throws IOException { // get the host; this is guaranteed to be non-null, non-empty bucket = name.getHost(); + AuditSpan span = null; try { LOG.debug("Initializing S3AFileSystem for {}", bucket); // clone the configuration into one with propagated bucket options @@ -411,8 +427,6 @@ public void initialize(URI name, Configuration originalConf) s3guardInvoker = new Invoker(new S3GuardExistsRetryPolicy(getConf()), onRetry); - writeHelper = new WriteOperationHelper(this, getConf(), - statisticsContext); failOnMetadataWriteError = conf.getBoolean(FAIL_ON_METADATA_WRITE_ERROR, FAIL_ON_METADATA_WRITE_ERROR_DEFAULT); @@ -442,6 +456,21 @@ public void initialize(URI name, Configuration originalConf) signerManager = new SignerManager(bucket, this, conf, owner); signerManager.initCustomSigners(); + // start auditing + initializeAuditService(); + + // create the requestFactory. + // requires the audit manager to be initialized. + requestFactory = createRequestFactory(); + + // create the static write operation helper. + // this doesn't have a short-lived span; auditors which + // require one may reject usages. + writeHelper = createWriteOperationHelper(getActiveAuditSpan()); + + // create an initial span for all other operations. + span = createSpan(INITIALIZE_SPAN, bucket, null); + // creates the AWS client, including overriding auth chain if // the FS came with a DT // this may do some patching of the configuration (e.g. setting @@ -467,11 +496,6 @@ public void initialize(URI name, Configuration originalConf) magicCommitterEnabled ? "is" : "is not"); committerIntegration = new MagicCommitIntegration( this, magicCommitterEnabled); - // header processing for rename and magic committer - headerProcessing = new HeaderProcessing(createStoreContext()); - - // instantiate S3 Select support - selectBinding = new SelectBinding(writeHelper); boolean blockUploadEnabled = conf.getBoolean(FAST_UPLOAD, true); @@ -519,10 +543,12 @@ public void initialize(URI name, Configuration originalConf) listing = new Listing(listingOperationCallbacks, createStoreContext()); } catch (AmazonClientException e) { // amazon client exception: stop all services then throw the translation + cleanupWithLogger(LOG, span); stopAllServices(); throw translateException("initializing ", new Path(name), e); } catch (IOException | RuntimeException e) { // other exceptions: stop the services. + cleanupWithLogger(LOG, span); stopAllServices(); throw e; } @@ -602,6 +628,7 @@ public Statistics getInstanceStatistics() { * @param conf configuration. */ private void initThreadPools(Configuration conf) { + final String name = "s3a-transfer-" + getBucket(); int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS); if (maxThreads < 2) { LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2."); @@ -615,13 +642,13 @@ private void initThreadPools(Configuration conf) { maxThreads, maxThreads + totalTasks, keepAliveTime, TimeUnit.SECONDS, - "s3a-transfer-shared"); + name + "-bounded"); unboundedThreadPool = new ThreadPoolExecutor( maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), BlockingThreadPoolExecutorService.newDaemonThreadFactory( - "s3a-transfer-unbounded")); + name + "-unbounded")); unboundedThreadPool.allowCoreThreadTimeOut(true); executorCapacity = intOption(conf, EXECUTOR_CAPACITY, DEFAULT_EXECUTOR_CAPACITY, 1); @@ -651,7 +678,9 @@ protected static S3AStorageStatistics createStorageStatistics( protected void verifyBucketExists() throws UnknownStoreException, IOException { if (!invoker.retry("doesBucketExist", bucket, true, - () -> s3.doesBucketExist(bucket))) { + trackDurationOfOperation(getDurationTrackerFactory(), + STORE_EXISTS_PROBE.getSymbol(), + () -> s3.doesBucketExist(bucket)))) { throw new UnknownStoreException("Bucket " + bucket + " does not exist"); } } @@ -667,7 +696,9 @@ protected void verifyBucketExists() protected void verifyBucketExistsV2() throws UnknownStoreException, IOException { if (!invoker.retry("doesBucketExistV2", bucket, true, - () -> s3.doesBucketExistV2(bucket))) { + trackDurationOfOperation(getDurationTrackerFactory(), + STORE_EXISTS_PROBE.getSymbol(), + () -> s3.doesBucketExistV2(bucket)))) { throw new UnknownStoreException("Bucket " + bucket + " does not exist"); } } @@ -750,13 +781,117 @@ private void bindAWSClient(URI name, boolean dtEnabled) throws IOException { .withEndpoint(conf.getTrimmed(ENDPOINT, DEFAULT_ENDPOINT)) .withMetrics(statisticsContext.newStatisticsFromAwsSdk()) .withPathStyleAccess(conf.getBoolean(PATH_STYLE_ACCESS, false)) - .withUserAgentSuffix(uaSuffix); + .withUserAgentSuffix(uaSuffix) + .withRequestHandlers(auditManager.createRequestHandlers()); s3 = ReflectionUtils.newInstance(s3ClientFactoryClass, conf) .createS3Client(getUri(), parameters); } + /** + * Initialize and launch the audit manager and service. + * As this takes the FS IOStatistics store, it must be invoked + * after instrumentation is initialized. + * @throws IOException failure to instantiate/initialize. + */ + protected void initializeAuditService() throws IOException { + auditManager = AuditIntegration.createAndStartAuditManager( + getConf(), + instrumentation.createMetricsUpdatingStore()); + } + + /** + * The audit manager. + * @return the audit manager + */ + @InterfaceAudience.Private + public AuditManagerS3A getAuditManager() { + return auditManager; + } + + /** + * Get the auditor; valid once initialized. + * @return the auditor. + */ + @InterfaceAudience.Private + public OperationAuditor getAuditor() { + return getAuditManager().getAuditor(); + } + + /** + * Get the active audit span. + * @return the span. + */ + @InterfaceAudience.Private + @Override + public AuditSpanS3A getActiveAuditSpan() { + return getAuditManager().getActiveAuditSpan(); + } + + /** + * Get the audit span source; allows for components like the committers + * to have a source of spans without being hard coded to the FS only. + * @return the source of spans -base implementation is this instance. + */ + @InterfaceAudience.Private + public AuditSpanSource getAuditSpanSource() { + return this; + } + + /** + * Start an operation; this informs the audit service of the event + * and then sets it as the active span. + * @param operation operation name. + * @param path1 first path of operation + * @param path2 second path of operation + * @return a span for the audit + * @throws IOException failure + */ + public AuditSpanS3A createSpan(String operation, + @Nullable String path1, + @Nullable String path2) + throws IOException { + + return getAuditManager().createSpan(operation, path1, path2); + } + + /** + * Build the request factory. + * MUST be called after reading encryption secrets from settings/ + * delegation token. + * Protected, in case test/mock classes want to implement their + * own variants. + * @return request factory. + */ + protected RequestFactory createRequestFactory() { + long partCountLimit = longOption(getConf(), + UPLOAD_PART_COUNT_LIMIT, + DEFAULT_UPLOAD_PART_COUNT_LIMIT, + 1); + if (partCountLimit != DEFAULT_UPLOAD_PART_COUNT_LIMIT) { + LOG.warn("Configuration property {} shouldn't be overridden by client", + UPLOAD_PART_COUNT_LIMIT); + } + + return RequestFactoryImpl.builder() + .withBucket(requireNonNull(bucket)) + .withCannedACL(getCannedACL()) + .withEncryptionSecrets(requireNonNull(encryptionSecrets)) + .withMultipartPartCountLimit(partCountLimit) + .withRequestPreparer(getAuditManager()::requestCreated) + .build(); + } + + /** + * Get the request factory which uses this store's audit span. + * @return the request factory. + */ + @VisibleForTesting + public RequestFactory getRequestFactory() { + return requestFactory; + } + /** * Implementation of all operations used by delegation tokens. */ @@ -783,6 +918,9 @@ public DelegationOperations createDelegationOperations() { */ protected void setEncryptionSecrets(final EncryptionSecrets secrets) { this.encryptionSecrets = secrets; + if (requestFactory != null) { + requestFactory.setEncryptionSecrets(secrets); + } } /** @@ -936,11 +1074,13 @@ protected void setAmazonS3Client(AmazonS3 client) { /** * Get the region of a bucket. + * Invoked from StoreContext; consider an entry point. * @return the region in which a bucket is located * @throws AccessDeniedException if the caller lacks permission. * @throws IOException on any failure. */ @Retries.RetryTranslated + @InterfaceAudience.LimitedPrivate("diagnostics") public String getBucketLocation() throws IOException { return getBucketLocation(bucket); } @@ -957,10 +1097,13 @@ public String getBucketLocation() throws IOException { * @throws IOException on any failure. */ @VisibleForTesting + @AuditEntryPoint @Retries.RetryTranslated public String getBucketLocation(String bucketName) throws IOException { - final String region = invoker.retry("getBucketLocation()", bucketName, true, - () -> s3.getBucketLocation(bucketName)); + final String region = trackDurationAndSpan( + STORE_EXISTS_PROBE, bucketName, null, () -> + invoker.retry("getBucketLocation()", bucketName, true, () -> + s3.getBucketLocation(bucketName))); return fixBucketRegion(region); } @@ -1192,14 +1335,16 @@ public FSDataInputStream open(Path f, int bufferSize) * @throws IOException IO failure. */ @Retries.RetryTranslated + @AuditEntryPoint private FSDataInputStream open( final Path file, final Optional options, final Optional providedStatus) throws IOException { - entryPoint(INVOCATION_OPEN); final Path path = qualify(file); + // this span is passed into the stream. + final AuditSpan auditSpan = entryPoint(INVOCATION_OPEN, path); S3AFileStatus fileStatus = extractOrFetchSimpleFileStatus(path, providedStatus); @@ -1217,13 +1362,15 @@ private FSDataInputStream open( fileStatus, policy, changeDetectionPolicy, - readAheadRange2); + readAheadRange2, + auditSpan); } else { readContext = createReadContext( fileStatus, inputPolicy, changeDetectionPolicy, - readAhead); + readAhead, + auditSpan); } LOG.debug("Opening '{}'", readContext); @@ -1231,7 +1378,60 @@ private FSDataInputStream open( new S3AInputStream( readContext, createObjectAttributes(fileStatus), - s3)); + createInputStreamCallbacks(auditSpan))); + } + + /** + * Override point: create the callbacks for S3AInputStream. + * @return an implementation of the InputStreamCallbacks, + */ + private S3AInputStream.InputStreamCallbacks createInputStreamCallbacks( + final AuditSpan auditSpan) { + return new InputStreamCallbacksImpl(auditSpan); + } + + /** + * Operations needed by S3AInputStream to read data. + */ + private final class InputStreamCallbacksImpl implements + S3AInputStream.InputStreamCallbacks { + + /** + * Audit span to activate before each call. + */ + private final AuditSpan auditSpan; + + /** + * Create. + * @param auditSpan Audit span to activate before each call. + */ + private InputStreamCallbacksImpl(final AuditSpan auditSpan) { + this.auditSpan = requireNonNull(auditSpan); + } + + /** + * Closes the audit span. + */ + @Override + public void close() { + auditSpan.close(); + } + + @Override + public GetObjectRequest newGetRequest(final String key) { + // active the audit span used for the operation + try (AuditSpan span = auditSpan.activate()) { + return getRequestFactory().newGetObjectRequest(key); + } + } + + @Override + public S3Object getObject(GetObjectRequest request) { + // active the audit span used for the operation + try (AuditSpan span = auditSpan.activate()) { + return s3.getObject(request); + } + } } /** @@ -1246,7 +1446,8 @@ private S3AReadOpContext createReadContext( final FileStatus fileStatus, final S3AInputPolicy seekPolicy, final ChangeDetectionPolicy changePolicy, - final long readAheadRange) { + final long readAheadRange, + final AuditSpan auditSpan) { return new S3AReadOpContext(fileStatus.getPath(), hasMetadataStore(), invoker, @@ -1256,7 +1457,8 @@ private S3AReadOpContext createReadContext( fileStatus, seekPolicy, changePolicy, - readAheadRange); + readAheadRange, + auditSpan); } /** @@ -1313,12 +1515,43 @@ private S3ObjectAttributes createObjectAttributes( * @see #setPermission(Path, FsPermission) */ @Override + @AuditEntryPoint @SuppressWarnings("IOResourceOpenedButNotSafelyClosed") public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { - entryPoint(INVOCATION_CREATE); final Path path = qualify(f); + // the span will be picked up inside the output stream + return trackDurationAndSpan(INVOCATION_CREATE, path, () -> + innerCreateFile(path, permission, overwrite, bufferSize, replication, + blockSize, progress)); + } + + /** + * Create an FSDataOutputStream at the indicated Path with write-progress + * reporting; in the active span. + * Retry policy: retrying, translated on the getFileStatus() probe. + * No data is uploaded to S3 in this call, so no retry issues related to that. + * @param path the file name to open + * @param permission the permission to set. + * @param overwrite if a file with this name already exists, then if true, + * the file will be overwritten, and if false an error will be thrown. + * @param bufferSize the size of the buffer to be used. + * @param replication required block replication for the file. + * @param blockSize the requested block size. + * @param progress the progress reporter. + * @throws IOException in the event of IO related errors. + * @see #setPermission(Path, FsPermission) + */ + @SuppressWarnings("IOResourceOpenedButNotSafelyClosed") + @Retries.RetryTranslated + private FSDataOutputStream innerCreateFile(Path path, + FsPermission permission, + boolean overwrite, + int bufferSize, + short replication, + long blockSize, + Progressable progress) throws IOException { String key = pathToKey(path); FileStatus status = null; try { @@ -1359,7 +1592,8 @@ public FSDataOutputStream create(Path f, FsPermission permission, .withStatistics(outputStreamStatistics) .withProgress(progress) .withPutTracker(putTracker) - .withWriteOperations(getWriteOperationHelper()) + .withWriteOperations( + createWriteOperationHelper(getActiveAuditSpan())) .withExecutorService( new SemaphoredDelegatingExecutor( boundedThreadPool, @@ -1376,7 +1610,9 @@ public FSDataOutputStream create(Path f, FsPermission permission, } /** - * Get a {@code WriteOperationHelper} instance. + * Create a Write Operation Helper with the current active span. + * All operations made through this helper will activate the + * span before execution. * * This class permits other low-level operations against the store. * It is unstable and @@ -1386,7 +1622,23 @@ public FSDataOutputStream create(Path f, FsPermission permission, */ @InterfaceAudience.Private public WriteOperationHelper getWriteOperationHelper() { - return writeHelper; + return createWriteOperationHelper(getActiveAuditSpan()); + } + + /** + * Create a Write Operation Helper with the given span. + * All operations made through this helper will activate the + * span before execution. + * @param auditSpan audit span + * @return a new helper. + */ + @InterfaceAudience.Private + public WriteOperationHelper createWriteOperationHelper(AuditSpan auditSpan) { + return new WriteOperationHelper(this, + getConf(), + statisticsContext, + getAuditSpanSource(), + auditSpan); } /** @@ -1395,6 +1647,7 @@ public WriteOperationHelper getWriteOperationHelper() { * is not a directory. */ @Override + @AuditEntryPoint public FSDataOutputStream createNonRecursive(Path p, FsPermission permission, EnumSet flags, @@ -1402,29 +1655,31 @@ public FSDataOutputStream createNonRecursive(Path p, short replication, long blockSize, Progressable progress) throws IOException { - entryPoint(INVOCATION_CREATE_NON_RECURSIVE); final Path path = makeQualified(p); - Path parent = path.getParent(); - // expect this to raise an exception if there is no parent dir - if (parent != null && !parent.isRoot()) { - S3AFileStatus status; - try { - // optimize for the directory existing: Call list first - status = innerGetFileStatus(parent, false, - StatusProbeEnum.DIRECTORIES); - } catch (FileNotFoundException e) { - // no dir, fall back to looking for a file - // (failure condition if true) - status = innerGetFileStatus(parent, false, - StatusProbeEnum.HEAD_ONLY); - } - if (!status.isDirectory()) { - throw new FileAlreadyExistsException("Not a directory: " + parent); + // this span is passed into the stream. + try (AuditSpan span = entryPoint(INVOCATION_CREATE_NON_RECURSIVE, path)) { + Path parent = path.getParent(); + // expect this to raise an exception if there is no parent dir + if (parent != null && !parent.isRoot()) { + S3AFileStatus status; + try { + // optimize for the directory existing: Call list first + status = innerGetFileStatus(parent, false, + StatusProbeEnum.DIRECTORIES); + } catch (FileNotFoundException e) { + // no dir, fall back to looking for a file + // (failure condition if true) + status = innerGetFileStatus(parent, false, + StatusProbeEnum.HEAD_ONLY); + } + if (!status.isDirectory()) { + throw new FileAlreadyExistsException("Not a directory: " + parent); + } } + return innerCreateFile(path, permission, + flags.contains(CreateFlag.OVERWRITE), bufferSize, + replication, blockSize, progress); } - return create(path, permission, - flags.contains(CreateFlag.OVERWRITE), bufferSize, - replication, blockSize, progress); } /** @@ -1462,11 +1717,13 @@ public FSDataOutputStream append(Path f, int bufferSize, * @throws IOException on IO failure * @return true if rename is successful */ + @AuditEntryPoint @Retries.RetryTranslated public boolean rename(Path src, Path dst) throws IOException { - try (DurationInfo ignored = new DurationInfo(LOG, false, - "rename(%s, %s", src, dst)) { - long bytesCopied = innerRename(src, dst); + try { + long bytesCopied = trackDurationAndSpan( + INVOCATION_RENAME, src.toString(), dst.toString(), () -> + innerRename(src, dst)); LOG.debug("Copied {} bytes", bytesCopied); return true; } catch (AmazonClientException e) { @@ -1604,7 +1861,6 @@ private long innerRename(Path source, Path dest) Path dst = qualify(dest); LOG.debug("Rename path {} to {}", src, dst); - entryPoint(INVOCATION_RENAME); String srcKey = pathToKey(src); String dstKey = pathToKey(dst); @@ -1618,7 +1874,7 @@ private long innerRename(Path source, Path dest) createStoreContext(), src, srcKey, p.getLeft(), dst, dstKey, p.getRight(), - operationCallbacks, + new OperationCallbacksImpl(), pageSize); return renameOperation.execute(); } @@ -1632,8 +1888,17 @@ private long innerRename(Path source, Path dest) * The callbacks made by the rename and delete operations. * This separation allows the operation to be factored out and * still avoid knowledge of the S3AFilesystem implementation. + * The Audit span active at the time of creation is cached and activated + * before every call. */ - private class OperationCallbacksImpl implements OperationCallbacks { + private final class OperationCallbacksImpl implements OperationCallbacks { + + /** Audit Span at time of creation. */ + private final AuditSpan auditSpan; + + private OperationCallbacksImpl() { + auditSpan = getActiveAuditSpan(); + } @Override public S3ObjectAttributes createObjectAttributes(final Path path, @@ -1654,7 +1919,8 @@ public S3ObjectAttributes createObjectAttributes( public S3AReadOpContext createReadContext(final FileStatus fileStatus) { return S3AFileSystem.this.createReadContext(fileStatus, inputPolicy, - changeDetectionPolicy, readAhead); + changeDetectionPolicy, readAhead, + auditSpan); } @Override @@ -1664,6 +1930,7 @@ public void deleteObjectAtPath(final Path path, final boolean isFile, final BulkOperationState operationState) throws IOException { + auditSpan.activate(); once("delete", path.toString(), () -> S3AFileSystem.this.deleteObjectAtPath(path, key, isFile, operationState)); @@ -1676,6 +1943,7 @@ public RemoteIterator listFilesAndDirectoryMarkers( final S3AFileStatus status, final boolean collectTombstones, final boolean includeSelf) throws IOException { + auditSpan.activate(); return innerListFiles( path, true, @@ -1692,6 +1960,7 @@ public CopyResult copyFile(final String srcKey, final String destKey, final S3ObjectAttributes srcAttributes, final S3AReadOpContext readContext) throws IOException { + auditSpan.activate(); return S3AFileSystem.this.copyFile(srcKey, destKey, srcAttributes.getLen(), srcAttributes, readContext); } @@ -1704,6 +1973,7 @@ public DeleteObjectsResult removeKeys( final BulkOperationState operationState, final boolean quiet) throws MultiObjectDeleteException, AmazonClientException, IOException { + auditSpan.activate(); return S3AFileSystem.this.removeKeys(keysToDelete, deleteFakeDir, undeletedObjectsOnFailure, operationState, quiet); } @@ -1711,6 +1981,7 @@ public DeleteObjectsResult removeKeys( @Override public void finishRename(final Path sourceRenamed, final Path destCreated) throws IOException { + auditSpan.activate(); Path destParent = destCreated.getParent(); if (!sourceRenamed.getParent().equals(destParent)) { LOG.debug("source & dest parents are different; fix up dir markers"); @@ -1737,10 +2008,17 @@ public RemoteIterator listObjects( createListObjectsRequest(key, null), ACCEPT_ALL, Listing.ACCEPT_ALL_BUT_S3N, - null)); + null, + auditSpan)); } } + /** + * Callbacks from {@link Listing}. + * Auditing: the listing object is long-lived; the audit span + * for a single listing is passed in from the listing + * method calls and then down to the callbacks. + */ protected class ListingOperationCallbacksImpl implements ListingOperationCallbacks { @@ -1748,9 +2026,10 @@ protected class ListingOperationCallbacksImpl implements @Retries.RetryRaw public CompletableFuture listObjectsAsync( S3ListRequest request, - DurationTrackerFactory trackerFactory) + DurationTrackerFactory trackerFactory, + AuditSpan span) throws IOException { - return submit(unboundedThreadPool, () -> + return submit(unboundedThreadPool, span, () -> listObjects(request, pairedTrackerFactory(trackerFactory, getDurationTrackerFactory()))); @@ -1761,9 +2040,10 @@ public CompletableFuture listObjectsAsync( public CompletableFuture continueListObjectsAsync( S3ListRequest request, S3ListResult prevResult, - DurationTrackerFactory trackerFactory) + DurationTrackerFactory trackerFactory, + AuditSpan span) throws IOException { - return submit(unboundedThreadPool, + return submit(unboundedThreadPool, span, () -> continueListObjects(request, prevResult, pairedTrackerFactory(trackerFactory, getDurationTrackerFactory()))); @@ -1778,8 +2058,10 @@ public S3ALocatedFileStatus toLocatedFileStatus( @Override public S3ListRequest createListObjectsRequest( - String key, - String delimiter) { + String key, + String delimiter, + AuditSpan span) { + span.activate(); return S3AFileSystem.this.createListObjectsRequest(key, delimiter); } @@ -1806,15 +2088,22 @@ public boolean allowAuthoritative(final Path p) { /** * Low-level call to get at the object metadata. + * This method is used in some external applications and so + * must be viewed as a public entry point. + * Auditing: An audit entry point. * @param path path to the object. This will be qualified. * @return metadata * @throws IOException IO and object access problems. */ @VisibleForTesting + @AuditEntryPoint + @InterfaceAudience.LimitedPrivate("utilities") @Retries.RetryTranslated + @InterfaceStability.Evolving public ObjectMetadata getObjectMetadata(Path path) throws IOException { - return getObjectMetadata(makeQualified(path), null, invoker, - "getObjectMetadata"); + return trackDurationAndSpan(INVOCATION_GET_FILE_STATUS, path, () -> + getObjectMetadata(makeQualified(path), null, invoker, + "getObjectMetadata")); } /** @@ -1830,7 +2119,6 @@ public ObjectMetadata getObjectMetadata(Path path) throws IOException { private ObjectMetadata getObjectMetadata(Path path, ChangeTracker changeTracker, Invoker changeInvoker, String operation) throws IOException { - checkNotClosed(); String key = pathToKey(path); return once(operation, path.toString(), () -> @@ -1878,12 +2166,76 @@ void setMetadataStore(MetadataStore ms) { /** * Entry point to an operation. * Increments the statistic; verifies the FS is active. - * @param operation The operation to increment - * @throws IOException if the - */ - protected void entryPoint(Statistic operation) throws IOException { + * @param operation The operation being invoked + * @param path first path of operation + * @return a span for the audit + * @throws IOException failure of audit service + */ + protected AuditSpan entryPoint(Statistic operation, + Path path) throws IOException { + return entryPoint(operation, + (path != null ? pathToKey(path): null), + null); + } + + /** + * Entry point to an operation. + * Increments the statistic; verifies the FS is active. + * @param operation The operation being invoked + * @param path1 first path of operation + * @param path2 second path of operation + * @return a span for the audit + * @throws IOException failure of audit service + */ + protected AuditSpan entryPoint(Statistic operation, + @Nullable String path1, + @Nullable String path2) throws IOException { checkNotClosed(); incrementStatistic(operation); + return createSpan(operation.getSymbol(), + path1, path2); + } + + /** + * Given an IOException raising callable/lambda expression, + * execute it and update the relevant statistic within a span + * of the same statistic. + * @param statistic statistic key + * @param path first path for span (nullable) + * @param path2 second path for span + * @param input input callable. + * @param return type. + * @return the result of the operation. + * @throws IOException if raised in the callable + */ + private B trackDurationAndSpan( + Statistic statistic, String path, String path2, + CallableRaisingIOE input) throws IOException { + checkNotClosed(); + try (AuditSpan span = createSpan(statistic.getSymbol(), + path, path2)) { + return trackDuration(getDurationTrackerFactory(), + statistic.getSymbol(), input); + } + } + + /** + * Overloaded version of {@code trackDurationAndSpan()}. + * Takes a single nullable path as the path param, + * @param statistic statistic key + * @param path path for span (nullable) + * @param input input callable. + * @param return type. + * @return the result of the operation. + * @throws IOException if raised in the callable + */ + private B trackDurationAndSpan( + Statistic statistic, + @Nullable Path path, + CallableRaisingIOE input) throws IOException { + return trackDurationAndSpan(statistic, + path != null ? pathToKey(path): null, + null, input); } /** @@ -2026,12 +2378,17 @@ protected DurationTrackerFactory getDurationTrackerFactory() { /** * Request object metadata; increments counters in the process. * Retry policy: retry untranslated. + * This method is used in some external applications and so + * must be viewed as a public entry point. + * Auditing: this call does NOT initiate a new AuditSpan; the expectation + * is that there is already an active span. * @param key key * @return the metadata * @throws IOException if the retry invocation raises one (it shouldn't). */ @Retries.RetryRaw @VisibleForTesting + @InterfaceAudience.LimitedPrivate("external utilities") ObjectMetadata getObjectMetadata(String key) throws IOException { return getObjectMetadata(key, null, invoker, "getObjectMetadata"); } @@ -2053,12 +2410,10 @@ protected ObjectMetadata getObjectMetadata(String key, ChangeTracker changeTracker, Invoker changeInvoker, String operation) throws IOException { - GetObjectMetadataRequest request = - new GetObjectMetadataRequest(bucket, key); - //SSE-C requires to be filled in if enabled for object metadata - generateSSECustomerKey().ifPresent(request::setSSECustomerKey); ObjectMetadata meta = changeInvoker.retryUntranslated("GET " + key, true, () -> { + GetObjectMetadataRequest request + = getRequestFactory().newGetObjectMetadataRequest(key); incrementStatistic(OBJECT_METADATA_REQUESTS); DurationTracker duration = getDurationTrackerFactory() .trackDuration(ACTION_HTTP_HEAD_REQUEST.getSymbol()); @@ -2161,7 +2516,10 @@ protected S3ListResult continueListObjects(S3ListRequest request, () -> { if (useListV1) { return S3ListResult.v1( - s3.listNextBatchOfObjects(prevResult.getV1())); + s3.listNextBatchOfObjects( + getRequestFactory() + .newListNextBatchOfObjectsRequest( + prevResult.getV1()))); } else { request.getV2().setContinuationToken(prevResult.getV2() .getNextContinuationToken()); @@ -2215,7 +2573,8 @@ protected void deleteObject(String key) incrementStatistic(OBJECT_DELETE_OBJECTS); trackDurationOfInvocation(getDurationTrackerFactory(), OBJECT_DELETE_REQUEST.getSymbol(), - () -> s3.deleteObject(bucket, key)); + () -> s3.deleteObject(getRequestFactory() + .newDeleteObjectRequest(key))); return null; }); } @@ -2328,46 +2687,7 @@ private DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteRequest) */ public PutObjectRequest newPutObjectRequest(String key, ObjectMetadata metadata, File srcfile) { - Preconditions.checkNotNull(srcfile); - PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, - srcfile); - setOptionalPutRequestParameters(putObjectRequest); - putObjectRequest.setCannedAcl(cannedACL); - putObjectRequest.setMetadata(metadata); - return putObjectRequest; - } - - /** - * Create a {@link PutObjectRequest} request. - * The metadata is assumed to have been configured with the size of the - * operation. - * @param key key of object - * @param metadata metadata header - * @param inputStream source data. - * @return the request - */ - PutObjectRequest newPutObjectRequest(String key, - ObjectMetadata metadata, - InputStream inputStream) { - Preconditions.checkNotNull(inputStream); - Preconditions.checkArgument(isNotEmpty(key), "Null/empty key"); - PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, - inputStream, metadata); - setOptionalPutRequestParameters(putObjectRequest); - putObjectRequest.setCannedAcl(cannedACL); - return putObjectRequest; - } - - /** - * Create a new object metadata instance. - * Any standard metadata headers are added here, for example: - * encryption. - * @return a new metadata instance - */ - public ObjectMetadata newObjectMetadata() { - final ObjectMetadata om = new ObjectMetadata(); - setOptionalObjectMetadata(om); - return om; + return requestFactory.newPutObjectRequest(key, metadata, srcfile); } /** @@ -2379,11 +2699,7 @@ public ObjectMetadata newObjectMetadata() { * @return a new metadata instance */ public ObjectMetadata newObjectMetadata(long length) { - final ObjectMetadata om = newObjectMetadata(); - if (length >= 0) { - om.setContentLength(length); - } - return om; + return requestFactory.newObjectMetadata(length); } /** @@ -2398,6 +2714,7 @@ public ObjectMetadata newObjectMetadata(long length) { * must reference data (files, buffers) which stay valid until the upload * completes. * Retry policy: N/A: the transfer manager is performing the upload. + * Auditing: must be inside an audit span. * @param putObjectRequest the request * @return the upload initiated */ @@ -2417,6 +2734,7 @@ public UploadInfo putObject(PutObjectRequest putObjectRequest) { * file, from the content length of the header. * * Retry Policy: none. + * Auditing: must be inside an audit span. * Important: this call will close any input stream in the request. * @param putObjectRequest the request * @return the upload initiated @@ -2433,13 +2751,16 @@ PutObjectResult putObjectDirect(PutObjectRequest putObjectRequest) LOG.debug("PUT {} bytes to {}", len, putObjectRequest.getKey()); incrementPutStartStatistics(len); try { - PutObjectResult result = s3.putObject(putObjectRequest); + PutObjectResult result = trackDurationOfSupplier( + getDurationTrackerFactory(), + OBJECT_PUT_REQUESTS.getSymbol(), () -> + s3.putObject(putObjectRequest)); incrementPutCompletedStatistics(true, len); // update metadata finishedWrite(putObjectRequest.getKey(), len, result.getETag(), result.getVersionId(), null); return result; - } catch (AmazonClientException e) { + } catch (SdkBaseException e) { incrementPutCompletedStatistics(false, len); throw e; } @@ -2478,7 +2799,6 @@ UploadPartResult uploadPart(UploadPartRequest request) long len = request.getPartSize(); incrementPutStartStatistics(len); try { - setOptionalUploadPartRequestParameters(request); UploadPartResult uploadPartResult = s3.uploadPart(request); incrementPutCompletedStatistics(true, len); return uploadPartResult; @@ -2497,7 +2817,6 @@ UploadPartResult uploadPart(UploadPartRequest request) public void incrementPutStartStatistics(long bytes) { LOG.debug("PUT start {} bytes", bytes); incrementWriteOperations(); - incrementStatistic(OBJECT_PUT_REQUESTS); incrementGauge(OBJECT_PUT_REQUESTS_ACTIVE, 1); if (bytes > 0) { incrementGauge(OBJECT_PUT_BYTES_PENDING, bytes); @@ -2513,7 +2832,6 @@ public void incrementPutStartStatistics(long bytes) { */ public void incrementPutCompletedStatistics(boolean success, long bytes) { LOG.debug("PUT completed success={}; {} bytes", success, bytes); - incrementWriteOperations(); if (bytes > 0) { incrementStatistic(OBJECT_PUT_BYTES, bytes); decrementGauge(OBJECT_PUT_BYTES_PENDING, bytes); @@ -2582,9 +2900,7 @@ private DeleteObjectsResult removeKeysS3( try { if (enableMultiObjectsDelete) { result = deleteObjects( - new DeleteObjectsRequest(bucket) - .withKeys(keysToDelete) - .withQuiet(quiet)); + getRequestFactory().newBulkDeleteRequest(keysToDelete, quiet)); } else { for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) { deleteObject(keyVersion.getKey()); @@ -2672,7 +2988,7 @@ public void removeKeys( * @throws IOException other IO Exception. */ @Retries.RetryMixed - DeleteObjectsResult removeKeys( + private DeleteObjectsResult removeKeys( final List keysToDelete, final boolean deleteFakeDir, final List undeletedObjectsOnFailure, @@ -2720,33 +3036,39 @@ DeleteObjectsResult removeKeys( * have surfaced. * @throws IOException due to inability to delete a directory or file. */ + @Override @Retries.RetryTranslated + @AuditEntryPoint public boolean delete(Path f, boolean recursive) throws IOException { - try { - entryPoint(INVOCATION_DELETE); - DeleteOperation deleteOperation = new DeleteOperation( - createStoreContext(), - innerGetFileStatus(f, true, StatusProbeEnum.ALL), - recursive, - operationCallbacks, - pageSize); - boolean outcome = deleteOperation.execute(); + checkNotClosed(); + final Path path = qualify(f); + // span covers delete, getFileStatus, fake directory operations. + try (AuditSpan span = createSpan(INVOCATION_DELETE.getSymbol(), + path.toString(), null)) { + boolean outcome = trackDuration(getDurationTrackerFactory(), + INVOCATION_DELETE.getSymbol(), + new DeleteOperation( + createStoreContext(), + innerGetFileStatus(path, true, StatusProbeEnum.ALL), + recursive, + new OperationCallbacksImpl(), + pageSize)); if (outcome) { try { - maybeCreateFakeParentDirectory(f); + maybeCreateFakeParentDirectory(path); } catch (AccessDeniedException e) { LOG.warn("Cannot create directory marker at {}: {}", f.getParent(), e.toString()); - LOG.debug("Failed to create fake dir above {}", f, e); + LOG.debug("Failed to create fake dir above {}", path, e); } } return outcome; } catch (FileNotFoundException e) { - LOG.debug("Couldn't delete {} - does not exist: {}", f, e.toString()); + LOG.debug("Couldn't delete {} - does not exist: {}", path, e.toString()); instrumentation.errorIgnored(); return false; } catch (AmazonClientException e) { - throw translateException("delete", f, e); + throw translateException("delete", path, e); } } @@ -2793,11 +3115,14 @@ protected void maybeCreateFakeParentDirectory(Path path) * */ @Override + @AuditEntryPoint public RemoteIterator listStatusIterator(Path p) throws FileNotFoundException, IOException { - RemoteIterator listStatusItr = once("listStatus", - p.toString(), () -> innerListStatus(p)); - return typeCastingRemoteIterator(listStatusItr); + Path path = qualify(p); + return typeCastingRemoteIterator(trackDurationAndSpan( + INVOCATION_LIST_STATUS, path, () -> + once("listStatus", path.toString(), () -> + innerListStatus(p)))); } /** @@ -2809,18 +3134,25 @@ public RemoteIterator listStatusIterator(Path p) * @throws FileNotFoundException when the path does not exist; * IOException see specific implementation */ + @Override + @AuditEntryPoint public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException { - return once("listStatus", - f.toString(), - () -> iteratorToStatuses(innerListStatus(f), new HashSet<>())); + Path path = qualify(f); + return trackDurationAndSpan(INVOCATION_LIST_STATUS, path, () -> + once("listStatus", path.toString(), + () -> iteratorToStatuses(innerListStatus(path), + new HashSet<>()))); } /** * List the statuses of the files/directories in the given path if the path is - * a directory. + * a directory. The returned iterator is within the current active span. * - * @param f given path + * Auditing: This method MUST be called within a span. + * The span is attached to the iterator. All further S3 calls + * made by the iterator will be within the span. + * @param f qualified path * @return the statuses of the files/directories in the given patch * @throws FileNotFoundException when the path does not exist; * @throws IOException due to an IO problem. @@ -2831,11 +3163,10 @@ private RemoteIterator innerListStatus(Path f) IOException, AmazonClientException { Path path = qualify(f); LOG.debug("List status for path: {}", path); - entryPoint(INVOCATION_LIST_STATUS); Triple, DirListingMetadata, Boolean> statusesAssumingNonEmptyDir = listing - .getFileStatusesAssumingNonEmptyDir(path); + .getFileStatusesAssumingNonEmptyDir(path, getActiveAuditSpan()); if (!statusesAssumingNonEmptyDir.getLeft().hasNext() && statusesAssumingNonEmptyDir.getRight()) { @@ -2897,34 +3228,28 @@ public boolean allowAuthoritative(final Path path) { @VisibleForTesting public S3ListRequest createListObjectsRequest(String key, String delimiter) { - return createListObjectsRequest(key, delimiter, null); + return createListObjectsRequest(key, delimiter, maxKeys); } + /** + * Create the List objects request appropriate for the + * active list request option. + * @param key key for request + * @param delimiter any delimiter + * @param limit limit of keys + * @return the request + */ private S3ListRequest createListObjectsRequest(String key, - String delimiter, Integer overrideMaxKeys) { + String delimiter, int limit) { if (!useListV1) { ListObjectsV2Request request = - new ListObjectsV2Request().withBucketName(bucket) - .withMaxKeys(maxKeys) - .withPrefix(key); - if (delimiter != null) { - request.setDelimiter(delimiter); - } - if (overrideMaxKeys != null) { - request.setMaxKeys(overrideMaxKeys); - } + getRequestFactory().newListObjectsV2Request( + key, delimiter, limit); return S3ListRequest.v2(request); } else { - ListObjectsRequest request = new ListObjectsRequest(); - request.setBucketName(bucket); - request.setMaxKeys(maxKeys); - request.setPrefix(key); - if (delimiter != null) { - request.setDelimiter(delimiter); - } - if (overrideMaxKeys != null) { - request.setMaxKeys(overrideMaxKeys); - } + ListObjectsRequest request = + getRequestFactory().newListObjectsV1Request( + key, delimiter, limit); return S3ListRequest.v1(request); } } @@ -2968,80 +3293,130 @@ public UserGroupInformation getOwner() { * Make the given path and all non-existent parents into * directories. Has the semantics of Unix {@code 'mkdir -p'}. * Existence of the directory hierarchy is not an error. - * @param path path to create - * @param permission to apply to f + * @param p path to create + * @param permission to apply to path * @return true if a directory was created or already existed * @throws FileAlreadyExistsException there is a file at the path specified + * or is discovered on one of its ancestors. * @throws IOException other IO problems */ - // TODO: If we have created an empty file at /foo/bar and we then call - // mkdirs for /foo/bar/baz/roo what happens to the empty file /foo/bar/? - public boolean mkdirs(Path path, FsPermission permission) throws IOException, + @Override + @AuditEntryPoint + public boolean mkdirs(Path p, FsPermission permission) throws IOException, FileAlreadyExistsException { - try { - entryPoint(INVOCATION_MKDIRS); - return innerMkdirs(path, permission); - } catch (AmazonClientException e) { - throw translateException("mkdirs", path, e); + Path path = qualify(p); + return trackDurationAndSpan( + INVOCATION_MKDIRS, path, + new MkdirOperation( + createStoreContext(), + path, + createMkdirOperationCallbacks())); + } + + /** + * Override point: create the callbacks for Mkdir. + * This does not create a new span; caller must be in one. + * @return an implementation of the MkdirCallbacks, + */ + @VisibleForTesting + public MkdirOperation.MkdirCallbacks createMkdirOperationCallbacks() { + return new MkdirOperationCallbacksImpl(); + } + + /** + * Callbacks from the {@link MkdirOperation}. + */ + protected class MkdirOperationCallbacksImpl implements + MkdirOperation.MkdirCallbacks { + + @Override + public S3AFileStatus probePathStatus(final Path path, + final Set probes) throws IOException { + return S3AFileSystem.this.innerGetFileStatus(path, false, probes); + } + + @Override + public void createFakeDirectory(final String key) + throws IOException { + S3AFileSystem.this.createEmptyObject(key); } } /** - * - * Make the given path and all non-existent parents into - * directories. - * See {@link #mkdirs(Path, FsPermission)} - * @param p path to create - * @param permission to apply to f - * @return true if a directory was created or already existed - * @throws FileAlreadyExistsException there is a file at the path specified - * @throws IOException other IO problems - * @throws AmazonClientException on failures inside the AWS SDK + * This is a very slow operation against object storage. + * Execute it as a single span with whatever optimizations + * have been implemented. + * {@inheritDoc} */ - private boolean innerMkdirs(Path p, FsPermission permission) - throws IOException, FileAlreadyExistsException, AmazonClientException { - Path f = qualify(p); - LOG.debug("Making directory: {}", f); - if (p.isRoot()) { - // fast exit for root. - return true; + @Override + @Retries.RetryTranslated + @AuditEntryPoint + public ContentSummary getContentSummary(final Path f) throws IOException { + final Path path = qualify(f); + return trackDurationAndSpan( + INVOCATION_GET_CONTENT_SUMMARY, path, + new GetContentSummaryOperation( + createStoreContext(), + path, + createGetContentSummaryCallbacks())); + } + + /** + * Override point: create the callbacks for getContentSummary. + * This does not create a new span; caller must be in one. + * @return an implementation of the GetContentSummaryCallbacksImpl + */ + protected GetContentSummaryOperation.GetContentSummaryCallbacks + createGetContentSummaryCallbacks() { + return new GetContentSummaryCallbacksImpl(); + } + + /** + * Callbacks from the {@link GetContentSummaryOperation}. + */ + protected class GetContentSummaryCallbacksImpl implements + GetContentSummaryOperation.GetContentSummaryCallbacks { + + @Override + public S3AFileStatus probePathStatus(final Path path, + final Set probes) throws IOException { + return S3AFileSystem.this.innerGetFileStatus(path, false, probes); } - FileStatus fileStatus; - try { - fileStatus = innerGetFileStatus(f, false, - StatusProbeEnum.ALL); + @Override + public RemoteIterator listStatusIterator(final Path path) + throws IOException { + return S3AFileSystem.this.innerListStatus(path); + } + } - if (fileStatus.isDirectory()) { - return true; - } else { - throw new FileAlreadyExistsException("Path is a file: " + f); - } - } catch (FileNotFoundException e) { - // Walk path to root, ensuring closest ancestor is a directory, not file - Path fPart = f.getParent(); - while (fPart != null && !fPart.isRoot()) { - try { - fileStatus = getFileStatus(fPart); - if (fileStatus.isDirectory()) { - break; - } - if (fileStatus.isFile()) { - throw new FileAlreadyExistsException(String.format( - "Can't make directory for path '%s' since it is a file.", - fPart)); + /** + * Soft check of access by forwarding to the audit manager + * and so on to the auditor. + * {@inheritDoc} + */ + @Override + @AuditEntryPoint + public void access(final Path f, final FsAction mode) + throws AccessControlException, FileNotFoundException, IOException { + Path path = qualify(f); + LOG.debug("check access mode {} for {}", path, mode); + trackDurationAndSpan( + INVOCATION_ACCESS, path, () -> { + final S3AFileStatus stat = innerGetFileStatus(path, false, + StatusProbeEnum.ALL); + if (!getAuditManager().checkAccess(path, stat, mode)) { + incrementStatistic(AUDIT_ACCESS_CHECK_FAILURE); + throw new AccessControlException(String.format( + "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", + getOwner().getUserName(), + stat.getPath(), + stat.getOwner(), stat.getGroup(), + stat.isDirectory() ? "d" : "-", mode)); } - } catch (FileNotFoundException fnfe) { - instrumentation.errorIgnored(); - } - fPart = fPart.getParent(); - } - String key = pathToKey(f); - // this will create the marker file, delete the parent entries - // and update S3Guard - createFakeDirectory(key); - return true; - } + // simply for the API binding. + return true; + }); } /** @@ -3051,10 +3426,14 @@ private boolean innerMkdirs(Path p, FsPermission permission) * @throws FileNotFoundException when the path does not exist * @throws IOException on other problems. */ + @Override + @AuditEntryPoint @Retries.RetryTranslated public FileStatus getFileStatus(final Path f) throws IOException { - entryPoint(INVOCATION_GET_FILE_STATUS); - return innerGetFileStatus(f, false, StatusProbeEnum.ALL); + Path path = qualify(f); + return trackDurationAndSpan( + INVOCATION_GET_FILE_STATUS, path, () -> + innerGetFileStatus(path, false, StatusProbeEnum.ALL)); } /** @@ -3399,12 +3778,16 @@ private boolean s3Exists(final Path path, final Set probes) * @throws AmazonClientException failure in the AWS SDK */ @Override + @AuditEntryPoint public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { - entryPoint(INVOCATION_COPY_FROM_LOCAL_FILE); + checkNotClosed(); LOG.debug("Copying local file from {} to {}", src, dst); -// innerCopyFromLocalFile(delSrc, overwrite, src, dst); - super.copyFromLocalFile(delSrc, overwrite, src, dst); + trackDurationAndSpan(INVOCATION_COPY_FROM_LOCAL_FILE, dst, () -> { + // innerCopyFromLocalFile(delSrc, overwrite, src, dst); + super.copyFromLocalFile(delSrc, overwrite, src, dst); + return null; + }); } /** @@ -3433,7 +3816,6 @@ public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, private void innerCopyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException, FileAlreadyExistsException, AmazonClientException { - entryPoint(INVOCATION_COPY_FROM_LOCAL_FILE); LOG.debug("Copying local file from {} to {}", src, dst); // Since we have a local file, we don't need to stream into a temporary file @@ -3447,7 +3829,7 @@ private void innerCopyFromLocalFile(boolean delSrc, boolean overwrite, } try { - FileStatus status = getFileStatus(dst); + FileStatus status = innerGetFileStatus(dst, false, StatusProbeEnum.ALL); if (!status.isFile()) { throw new FileAlreadyExistsException(dst + " exists and is not a file"); } @@ -3552,6 +3934,12 @@ public void close() throws IOException { } finally { stopAllServices(); } + // Log IOStatistics at debug. + if (LOG.isDebugEnabled()) { + // robust extract and convert to string + LOG.debug("Statistics for {}: {}", uri, + IOStatisticsLogging.ioStatisticsToPrettyString(getIOStatistics())); + } } /** @@ -3561,6 +3949,8 @@ public void close() throws IOException { * both the expected state of this FS and of failures while being stopped. */ protected synchronized void stopAllServices() { + // shutting down the transfer manager also shuts + // down the S3 client it is bonded to. if (transfers != null) { try { transfers.shutdownNow(true); @@ -3570,17 +3960,21 @@ protected synchronized void stopAllServices() { } transfers = null; } + // At this point the S3A client is shut down, + // now the executor pools are closed HadoopExecutors.shutdown(boundedThreadPool, LOG, THREAD_POOL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS); boundedThreadPool = null; HadoopExecutors.shutdown(unboundedThreadPool, LOG, THREAD_POOL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS); unboundedThreadPool = null; + // other services are shutdown. cleanupWithLogger(LOG, metadataStore, instrumentation, delegationTokens.orElse(null), - signerManager); + signerManager, + auditManager); closeAutocloseables(LOG, credentials); delegationTokens = Optional.empty(); signerManager = null; @@ -3638,13 +4032,17 @@ public String getCanonicalServiceName() { * @throws IOException IO failure */ @Override + @AuditEntryPoint public Token getDelegationToken(String renewer) throws IOException { - entryPoint(Statistic.INVOCATION_GET_DELEGATION_TOKEN); + checkNotClosed(); LOG.debug("Delegation token requested"); if (delegationTokens.isPresent()) { - return delegationTokens.get().getBoundOrNewDT(encryptionSecrets, - (renewer != null ? new Text(renewer) : new Text())); + return trackDurationAndSpan( + INVOCATION_GET_DELEGATION_TOKEN, null, () -> + delegationTokens.get().getBoundOrNewDT( + encryptionSecrets, + (renewer != null ? new Text(renewer) : new Text()))); } else { // Delegation token support is not set up LOG.debug("Token support is not enabled"); @@ -3662,6 +4060,7 @@ public Token getDelegationToken(String renewer) @Override public DelegationTokenIssuer[] getAdditionalTokenIssuers() throws IOException { + checkNotClosed(); if (delegationTokens.isPresent()) { return delegationTokens.get().getAdditionalTokenIssuers(); } else { @@ -3680,6 +4079,7 @@ public DelegationTokenIssuer[] getAdditionalTokenIssuers() * @return a policy for use in roles */ @Override + @InterfaceAudience.Private public List listAWSPolicyRules( final Set access) { if (access.isEmpty()) { @@ -3768,24 +4168,17 @@ private CopyResult copyFile(String srcKey, String dstKey, long size, message, e); } - ObjectMetadata dstom = cloneObjectMetadata(srcom); - setOptionalObjectMetadata(dstom); return readInvoker.retry( action, srcKey, true, () -> { CopyObjectRequest copyObjectRequest = - new CopyObjectRequest(bucket, srcKey, bucket, dstKey); + getRequestFactory().newCopyObjectRequest(srcKey, dstKey, srcom); changeTracker.maybeApplyConstraint(copyObjectRequest); - - setOptionalCopyObjectRequestParameters(srcom, copyObjectRequest); - copyObjectRequest.setCannedAccessControlList(cannedACL); - copyObjectRequest.setNewObjectMetadata(dstom); - Optional.ofNullable(srcom.getStorageClass()) - .ifPresent(copyObjectRequest::setStorageClass); incrementStatistic(OBJECT_COPY_REQUESTS); - Copy copy = transfers.copy(copyObjectRequest); + Copy copy = transfers.copy(copyObjectRequest, + getAuditManager().createStateChangeListener()); copy.addProgressListener(progressListener); CopyOutcome copyOutcome = CopyOutcome.waitForCopy(copy); InterruptedException interruptedException = @@ -3810,62 +4203,6 @@ private CopyResult copyFile(String srcKey, String dstKey, long size, }); } - /** - * Propagate encryption parameters from source file if set else use the - * current filesystem encryption settings. - * @param srcom source object meta. - * @param copyObjectRequest copy object request body. - */ - private void setOptionalCopyObjectRequestParameters( - ObjectMetadata srcom, - CopyObjectRequest copyObjectRequest) { - String sourceKMSId = srcom.getSSEAwsKmsKeyId(); - if (isNotEmpty(sourceKMSId)) { - // source KMS ID is propagated - LOG.debug("Propagating SSE-KMS settings from source {}", - sourceKMSId); - copyObjectRequest.setSSEAwsKeyManagementParams( - new SSEAwsKeyManagementParams(sourceKMSId)); - } - switch(getServerSideEncryptionAlgorithm()) { - /** - * Overriding with client encryption settings. - */ - case SSE_C: - generateSSECustomerKey().ifPresent(customerKey -> { - copyObjectRequest.setSourceSSECustomerKey(customerKey); - copyObjectRequest.setDestinationSSECustomerKey(customerKey); - }); - break; - case SSE_KMS: - generateSSEAwsKeyParams().ifPresent( - copyObjectRequest::setSSEAwsKeyManagementParams); - break; - default: - } - } - - /** - * Set the optional parameters when initiating the request (encryption, - * headers, storage, etc). - * @param request request to patch. - */ - protected void setOptionalMultipartUploadRequestParameters( - InitiateMultipartUploadRequest request) { - generateSSEAwsKeyParams().ifPresent(request::setSSEAwsKeyManagementParams); - generateSSECustomerKey().ifPresent(request::setSSECustomerKey); - } - - /** - * Sets server side encryption parameters to the part upload - * request when encryption is enabled. - * @param request upload part request - */ - protected void setOptionalUploadPartRequestParameters( - UploadPartRequest request) { - generateSSECustomerKey().ifPresent(request::setSSECustomerKey); - } - /** * Initiate a multipart upload from the preconfigured request. * Retry policy: none + untranslated. @@ -3878,42 +4215,9 @@ protected void setOptionalUploadPartRequestParameters( InitiateMultipartUploadResult initiateMultipartUpload( InitiateMultipartUploadRequest request) throws IOException { LOG.debug("Initiate multipart upload to {}", request.getKey()); - incrementStatistic(OBJECT_MULTIPART_UPLOAD_INITIATED); - return getAmazonS3Client().initiateMultipartUpload(request); - } - - private void setOptionalPutRequestParameters(PutObjectRequest request) { - generateSSEAwsKeyParams().ifPresent(request::setSSEAwsKeyManagementParams); - generateSSECustomerKey().ifPresent(request::setSSECustomerKey); - } - - private void setOptionalObjectMetadata(ObjectMetadata metadata) { - final S3AEncryptionMethods algorithm - = getServerSideEncryptionAlgorithm(); - if (S3AEncryptionMethods.SSE_S3.equals(algorithm)) { - metadata.setSSEAlgorithm(algorithm.getMethod()); - } - } - - /** - * Create the AWS SDK structure used to configure SSE, - * if the encryption secrets contain the information/settings for this. - * @return an optional set of KMS Key settings - */ - private Optional generateSSEAwsKeyParams() { - return EncryptionSecretOperations.createSSEAwsKeyManagementParams( - encryptionSecrets); - } - - /** - * Create the SSE-C structure for the AWS SDK, if the encryption secrets - * contain the information/settings for this. - * This will contain a secret extracted from the bucket/configuration. - * @return an optional customer key. - */ - private Optional generateSSECustomerKey() { - return EncryptionSecretOperations.createSSECustomerKey( - encryptionSecrets); + return trackDurationOfSupplier(getDurationTrackerFactory(), + OBJECT_MULTIPART_UPLOAD_INITIATED.getSymbol(), + () -> getAmazonS3Client().initiateMultipartUpload(request)); } /** @@ -3958,7 +4262,7 @@ void finishedWrite(String key, long length, String eTag, String versionId, CompletableFuture deletion; if (!keepDirectoryMarkers(p)) { deletion = submit( - unboundedThreadPool, + unboundedThreadPool, getActiveAuditSpan(), () -> { deleteUnnecessaryFakeDirectories( p.getParent(), @@ -4077,11 +4381,7 @@ private void deleteUnnecessaryFakeDirectories(Path path, @Retries.RetryTranslated private void createFakeDirectory(final String objectName) throws IOException { - if (!objectName.endsWith("/")) { - createEmptyObject(objectName + "/"); - } else { - createEmptyObject(objectName); - } + createEmptyObject(objectName); } /** @@ -4093,36 +4393,14 @@ private void createFakeDirectory(final String objectName) @Retries.RetryTranslated private void createEmptyObject(final String objectName) throws IOException { - final InputStream im = new InputStream() { - @Override - public int read() throws IOException { - return -1; - } - }; - - PutObjectRequest putObjectRequest = newPutObjectRequest(objectName, - newObjectMetadata(0L), - im); invoker.retry("PUT 0-byte object ", objectName, - true, - () -> putObjectDirect(putObjectRequest)); + true, () -> + putObjectDirect(getRequestFactory() + .newDirectoryMarkerRequest(objectName))); incrementPutProgressStatistics(objectName, 0); instrumentation.directoryCreated(); } - /** - * Creates a copy of the passed {@link ObjectMetadata}. - * Does so without using the {@link ObjectMetadata#clone()} method, - * to avoid copying unnecessary headers. - * @param source the {@link ObjectMetadata} to copy - * @return a copy of {@link ObjectMetadata} with only relevant attributes - */ - private ObjectMetadata cloneObjectMetadata(ObjectMetadata source) { - ObjectMetadata ret = newObjectMetadata(source.getContentLength()); - getHeaderProcessing().cloneObjectMetadata(source, ret); - return ret; - } - /** * Return the number of bytes that large input files should be optimally * be split into to minimize I/O time. @@ -4167,6 +4445,7 @@ public String toString() { if (blockFactory != null) { sb.append(", blockFactory=").append(blockFactory); } + sb.append(", auditManager=").append(auditManager); sb.append(", metastore=").append(metadataStore); sb.append(", authoritativeStore=").append(allowAuthoritativeMetadataStore); sb.append(", authoritativePath=").append(allowAuthoritativePaths); @@ -4247,20 +4526,26 @@ public FileStatus[] globStatus(Path pathPattern) throws IOException { * Increments the statistic {@link Statistic#INVOCATION_GLOB_STATUS}. * Override superclass so as to disable symlink resolution as symlinks * are not supported by S3A. + * + * Although an AuditEntryPoint, the globber itself will talk do + * the filesystem through the filesystem API, so its operations will + * all appear part of separate operations. * {@inheritDoc} */ @Override + @AuditEntryPoint public FileStatus[] globStatus( final Path pathPattern, final PathFilter filter) throws IOException { - entryPoint(INVOCATION_GLOB_STATUS); - return Globber.createGlobber(this) - .withPathPattern(pathPattern) - .withPathFiltern(filter) - .withResolveSymlinks(false) - .build() - .glob(); + return trackDurationAndSpan( + INVOCATION_GLOB_STATUS, pathPattern, () -> + Globber.createGlobber(this) + .withPathPattern(pathPattern) + .withPathFiltern(filter) + .withResolveSymlinks(false) + .build() + .glob()); } /** @@ -4268,9 +4553,17 @@ public FileStatus[] globStatus( * {@inheritDoc} */ @Override + @AuditEntryPoint public boolean exists(Path f) throws IOException { - entryPoint(INVOCATION_EXISTS); - return super.exists(f); + final Path path = qualify(f); + try { + trackDurationAndSpan( + INVOCATION_EXISTS, path, () -> + innerGetFileStatus(path, false, StatusProbeEnum.ALL)); + return true; + } catch (FileNotFoundException e) { + return false; + } } /** @@ -4280,12 +4573,15 @@ public boolean exists(Path f) throws IOException { * {@inheritDoc} */ @Override + @AuditEntryPoint @SuppressWarnings("deprecation") public boolean isDirectory(Path f) throws IOException { - entryPoint(INVOCATION_IS_DIRECTORY); + final Path path = qualify(f); try { - return innerGetFileStatus(f, false, StatusProbeEnum.DIRECTORIES) - .isDirectory(); + return trackDurationAndSpan( + INVOCATION_IS_DIRECTORY, path, () -> + innerGetFileStatus(path, false, StatusProbeEnum.DIRECTORIES) + .isDirectory()); } catch (FileNotFoundException e) { // not found or it is a file. return false; @@ -4299,12 +4595,14 @@ public boolean isDirectory(Path f) throws IOException { * {@inheritDoc} */ @Override + @AuditEntryPoint @SuppressWarnings("deprecation") public boolean isFile(Path f) throws IOException { - entryPoint(INVOCATION_IS_FILE); + final Path path = qualify(f); try { - return innerGetFileStatus(f, false, StatusProbeEnum.HEAD_ONLY) - .isFile(); + return trackDurationAndSpan(INVOCATION_IS_FILE, path, () -> + innerGetFileStatus(path, false, StatusProbeEnum.HEAD_ONLY) + .isFile()); } catch (FileNotFoundException e) { // not found or it is a dir. return false; @@ -4332,18 +4630,21 @@ public boolean isFile(Path f) throws IOException { */ @Override @Retries.RetryTranslated + @AuditEntryPoint public EtagChecksum getFileChecksum(Path f, final long length) throws IOException { Preconditions.checkArgument(length >= 0); - entryPoint(INVOCATION_GET_FILE_CHECKSUM); - + final Path path = qualify(f); if (getConf().getBoolean(ETAG_CHECKSUM_ENABLED, ETAG_CHECKSUM_ENABLED_DEFAULT)) { - Path path = qualify(f); - LOG.debug("getFileChecksum({})", path); - ObjectMetadata headers = getObjectMetadata(path); - String eTag = headers.getETag(); - return eTag != null ? new EtagChecksum(eTag) : null; + return trackDurationAndSpan(INVOCATION_GET_FILE_CHECKSUM, path, () -> { + LOG.debug("getFileChecksum({})", path); + ObjectMetadata headers = getObjectMetadata(path, null, + invoker, + "getFileChecksum are"); + String eTag = headers.getETag(); + return eTag != null ? new EtagChecksum(eTag) : null; + }); } else { // disabled return null; @@ -4352,35 +4653,83 @@ public EtagChecksum getFileChecksum(Path f, final long length) /** * Get header processing support. - * @return the header processing of this instance. + * @return a new header processing instance. */ private HeaderProcessing getHeaderProcessing() { - return headerProcessing; + return new HeaderProcessing(createStoreContext(), + createHeaderProcessingCallbacks()); } @Override + @AuditEntryPoint public byte[] getXAttr(final Path path, final String name) throws IOException { - return getHeaderProcessing().getXAttr(path, name); + checkNotClosed(); + try (AuditSpan span = createSpan( + INVOCATION_XATTR_GET_NAMED.getSymbol(), + path.toString(), null)) { + return getHeaderProcessing().getXAttr(path, name); + } } @Override + @AuditEntryPoint public Map getXAttrs(final Path path) throws IOException { - return getHeaderProcessing().getXAttrs(path); + checkNotClosed(); + try (AuditSpan span = createSpan( + INVOCATION_XATTR_GET_MAP.getSymbol(), + path.toString(), null)) { + return getHeaderProcessing().getXAttrs(path); + } } @Override + @AuditEntryPoint public Map getXAttrs(final Path path, final List names) throws IOException { - return getHeaderProcessing().getXAttrs(path, names); + checkNotClosed(); + try (AuditSpan span = createSpan( + INVOCATION_XATTR_GET_NAMED_MAP.getSymbol(), + path.toString(), null)) { + return getHeaderProcessing().getXAttrs(path, names); + } } @Override + @AuditEntryPoint public List listXAttrs(final Path path) throws IOException { - return getHeaderProcessing().listXAttrs(path); + checkNotClosed(); + try (AuditSpan span = createSpan( + INVOCATION_OP_XATTR_LIST.getSymbol(), + path.toString(), null)) { + return getHeaderProcessing().listXAttrs(path); + } + } + + /** + * Create the callbacks. + * @return An implementation of the header processing + * callbacks. + */ + protected HeaderProcessing.HeaderProcessingCallbacks + createHeaderProcessingCallbacks() { + return new HeaderProcessingCallbacksImpl(); } + /** + * Operations needed for Header Processing. + */ + protected final class HeaderProcessingCallbacksImpl implements + HeaderProcessing.HeaderProcessingCallbacks { + + @Override + public ObjectMetadata getObjectMetadata(final String key) + throws IOException { + return once("getObjectMetadata", key, () -> + S3AFileSystem.this.getObjectMetadata(key)); + } + } /** * {@inheritDoc}. * @@ -4408,38 +4757,53 @@ public List listXAttrs(final Path path) throws IOException { */ @Override @Retries.RetryTranslated + @AuditEntryPoint public RemoteIterator listFiles(Path f, boolean recursive) throws FileNotFoundException, IOException { - return toLocatedFileStatusIterator(innerListFiles(f, recursive, - new Listing.AcceptFilesOnly(qualify(f)), null, true, false)); + final Path path = qualify(f); + return toLocatedFileStatusIterator( + trackDurationAndSpan(INVOCATION_LIST_FILES, path, () -> + innerListFiles(path, recursive, + new Listing.AcceptFilesOnly(path), null, true, false))); } /** * Recursive List of files and empty directories. * @param f path to list from + * @param recursive recursive? * @return an iterator. * @throws IOException failure */ + @InterfaceAudience.Private @Retries.RetryTranslated + @AuditEntryPoint public RemoteIterator listFilesAndEmptyDirectories( Path f, boolean recursive) throws IOException { - return innerListFiles(f, recursive, Listing.ACCEPT_ALL_BUT_S3N, - null, true, false); + final Path path = qualify(f); + return trackDurationAndSpan(INVOCATION_LIST_FILES, path, () -> + innerListFiles(path, recursive, + Listing.ACCEPT_ALL_BUT_S3N, + null, true, false)); } /** * Recursive List of files and empty directories, force metadatastore * to act like it is non-authoritative. * @param f path to list from - * @param recursive + * @param recursive recursive listing? * @return an iterator. * @throws IOException failure */ + @InterfaceAudience.Private @Retries.RetryTranslated + @AuditEntryPoint public RemoteIterator listFilesAndEmptyDirectoriesForceNonAuth( Path f, boolean recursive) throws IOException { - return innerListFiles(f, recursive, Listing.ACCEPT_ALL_BUT_S3N, - null, true, true); + final Path path = qualify(f); + return trackDurationAndSpan(INVOCATION_LIST_FILES, path, () -> + innerListFiles(path, recursive, + Listing.ACCEPT_ALL_BUT_S3N, + null, true, true)); } /** @@ -4488,7 +4852,6 @@ private RemoteIterator innerListFiles( final S3AFileStatus status, final boolean collectTombstones, final boolean forceNonAuthoritativeMS) throws IOException { - entryPoint(INVOCATION_LIST_FILES); Path path = qualify(f); LOG.debug("listFiles({}, {})", path, recursive); try { @@ -4506,7 +4869,8 @@ private RemoteIterator innerListFiles( recursive, acceptor, collectTombstones, - forceNonAuthoritativeMS); + forceNonAuthoritativeMS, + getActiveAuditSpan()); // If there are no list entries present, we // fallback to file existence check as the path // can be a file or empty directory. @@ -4514,7 +4878,7 @@ private RemoteIterator innerListFiles( // If file status was already passed, reuse it. final S3AFileStatus fileStatus = status != null ? status - : (S3AFileStatus) getFileStatus(path); + : innerGetFileStatus(path, false, StatusProbeEnum.ALL); if (fileStatus.isFile()) { return listing.createSingleStatusIterator( toLocatedFileStatus(fileStatus)); @@ -4553,11 +4917,12 @@ public RemoteIterator listLocatedStatus(Path f) */ @Override @Retries.OnceTranslated("s3guard not retrying") + @AuditEntryPoint public RemoteIterator listLocatedStatus(final Path f, final PathFilter filter) throws FileNotFoundException, IOException { - entryPoint(INVOCATION_LIST_LOCATED_STATUS); Path path = qualify(f); + AuditSpan span = entryPoint(INVOCATION_LIST_LOCATED_STATUS, path); LOG.debug("listLocatedStatus({}, {}", path, filter); RemoteIterator iterator = once("listLocatedStatus", path.toString(), @@ -4566,12 +4931,13 @@ public RemoteIterator listLocatedStatus(final Path f, // trigger a list call directly. final RemoteIterator locatedFileStatusIteratorForDir = - listing.getLocatedFileStatusIteratorForDir(path, filter); + listing.getLocatedFileStatusIteratorForDir(path, filter, + span); // If no listing is present then path might be a file. if (!locatedFileStatusIteratorForDir.hasNext()) { final S3AFileStatus fileStatus = - (S3AFileStatus) getFileStatus(path); + innerGetFileStatus(path, false, StatusProbeEnum.ALL); if (fileStatus.isFile()) { // simple case: File LOG.debug("Path is a file"); @@ -4587,43 +4953,6 @@ public RemoteIterator listLocatedStatus(final Path f, return toLocatedFileStatusIterator(iterator); } - /** - * Generate list located status for a directory. - * Also performing tombstone reconciliation for guarded directories. - * @param dir directory to check. - * @param filter a path filter. - * @return an iterator that traverses statuses of the given dir. - * @throws IOException in case of failure. - */ - private RemoteIterator getLocatedFileStatusIteratorForDir( - Path dir, PathFilter filter) throws IOException { - final String key = maybeAddTrailingSlash(pathToKey(dir)); - final Listing.FileStatusAcceptor acceptor = - new Listing.AcceptAllButSelfAndS3nDirs(dir); - boolean allowAuthoritative = allowAuthoritative(dir); - DirListingMetadata meta = - S3Guard.listChildrenWithTtl(metadataStore, dir, - ttlTimeProvider, allowAuthoritative); - Set tombstones = meta != null - ? meta.listTombstones() - : null; - final RemoteIterator cachedFileStatusIterator = - listing.createProvidedFileStatusIterator( - dirMetaToStatuses(meta), filter, acceptor); - return (allowAuthoritative && meta != null - && meta.isAuthoritative()) - ? listing.createLocatedFileStatusIterator( - cachedFileStatusIterator) - : listing.createTombstoneReconcilingIterator( - listing.createLocatedFileStatusIterator( - listing.createFileStatusListingIterator(dir, - createListObjectsRequest(key, "/"), - filter, - acceptor, - cachedFileStatusIterator)), - tombstones); - } - /** * Build a {@link S3ALocatedFileStatus} from a {@link FileStatus} instance. * @param status file status @@ -4648,10 +4977,17 @@ S3ALocatedFileStatus toLocatedFileStatus(S3AFileStatus status) * @return Iterator over multipart uploads. * @throws IOException on failure */ + @InterfaceAudience.Private + @Retries.RetryTranslated + @AuditEntryPoint public MultipartUtils.UploadIterator listUploads(@Nullable String prefix) throws IOException { - return MultipartUtils.listMultipartUploads(s3, invoker, bucket, maxKeys, - prefix); + // span is picked up retained in the listing. + return trackDurationAndSpan(MULTIPART_UPLOAD_LIST, prefix, null, () -> + MultipartUtils.listMultipartUploads( + createStoreContext(), + s3, prefix, maxKeys + )); } /** @@ -4667,17 +5003,16 @@ public MultipartUtils.UploadIterator listUploads(@Nullable String prefix) @Retries.RetryTranslated public List listMultipartUploads(String prefix) throws IOException { - ListMultipartUploadsRequest request = new ListMultipartUploadsRequest( - bucket); - if (!prefix.isEmpty()) { - if (!prefix.endsWith("/")) { - prefix = prefix + "/"; - } - request.setPrefix(prefix); + // add a trailing / if needed. + if (prefix != null && !prefix.isEmpty() && !prefix.endsWith("/")) { + prefix = prefix + "/"; } - - return invoker.retry("listMultipartUploads", prefix, true, - () -> s3.listMultipartUploads(request).getMultipartUploads()); + String p = prefix; + return invoker.retry("listMultipartUploads", p, true, () -> { + ListMultipartUploadsRequest request = getRequestFactory() + .newListMultipartUploadsRequest(p); + return s3.listMultipartUploads(request).getMultipartUploads(); + }); } /** @@ -4690,7 +5025,7 @@ public List listMultipartUploads(String prefix) void abortMultipartUpload(String destKey, String uploadId) { LOG.info("Aborting multipart upload {} to {}", uploadId, destKey); getAmazonS3Client().abortMultipartUpload( - new AbortMultipartUploadRequest(getBucket(), + getRequestFactory().newAbortMultipartUploadRequest( destKey, uploadId)); } @@ -4713,7 +5048,7 @@ void abortMultipartUpload(MultipartUpload upload) { df.format(upload.getInitiated())); } getAmazonS3Client().abortMultipartUpload( - new AbortMultipartUploadRequest(getBucket(), + getRequestFactory().newAbortMultipartUploadRequest( destKey, uploadId)); } @@ -4741,7 +5076,7 @@ public boolean hasPathCapability(final Path path, final String capability) case SelectConstants.S3_SELECT_CAPABILITY: // select is only supported if enabled - return selectBinding.isEnabled(); + return SelectBinding.isSelectEnabled(getConf()); case CommonPathCapabilities.FS_CHECKSUMS: // capability depends on FS configuration @@ -4818,8 +5153,6 @@ protected void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) { /** * This is a proof of concept of a select API. - * Once a proper factory mechanism for opening files is added to the - * FileSystem APIs, this will be deleted without any warning. * @param source path to source data * @param expression select expression * @param options request configuration from the builder. @@ -4828,12 +5161,13 @@ protected void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) { * @throws IOException IO failure */ @Retries.RetryTranslated + @AuditEntryPoint private FSDataInputStream select(final Path source, final String expression, final Configuration options, final Optional providedStatus) throws IOException { - entryPoint(OBJECT_SELECT_REQUESTS); + final AuditSpan auditSpan = entryPoint(OBJECT_SELECT_REQUESTS, source); requireSelectSupport(source); final Path path = makeQualified(source); final S3AFileStatus fileStatus = extractOrFetchSimpleFileStatus(path, @@ -4843,7 +5177,7 @@ private FSDataInputStream select(final Path source, long ra = options.getLong(READAHEAD_RANGE, readAhead); S3ObjectAttributes objectAttributes = createObjectAttributes(fileStatus); S3AReadOpContext readContext = createReadContext(fileStatus, inputPolicy, - changeDetectionPolicy, ra); + changeDetectionPolicy, ra, auditSpan); if (changeDetectionPolicy.getSource() != ChangeDetectionPolicy.Source.None && fileStatus.getETag() != null) { @@ -4867,13 +5201,16 @@ private FSDataInputStream select(final Path source, Invoker readInvoker = readContext.getReadInvoker(); getObjectMetadata(path, changeTracker, readInvoker, "select"); } + // instantiate S3 Select support using the current span + // as the active span for operations. + SelectBinding selectBinding = new SelectBinding( + createWriteOperationHelper(auditSpan)); // build and execute the request return selectBinding.select( readContext, expression, options, - generateSSECustomerKey(), objectAttributes); } @@ -4884,7 +5221,8 @@ private FSDataInputStream select(final Path source, */ private void requireSelectSupport(final Path source) throws UnsupportedOperationException { - if (!selectBinding.isEnabled()) { + if (!SelectBinding.isSelectEnabled(getConf())) { + throw new UnsupportedOperationException( SelectConstants.SELECT_UNSUPPORTED); } @@ -4923,7 +5261,9 @@ private S3AFileStatus extractOrFetchSimpleFileStatus( /** * Initiate the open or select operation. - * This is invoked from both the FileSystem and FileContext APIs + * This is invoked from both the FileSystem and FileContext APIs. + * It's declared as an audit entry point but the span creation is pushed + * down into the open/select methods it ultimately calls. * @param rawPath path to the file * @param parameters open file parameters from the builder. * @return a future which will evaluate to the opened/selected file. @@ -4934,6 +5274,7 @@ private S3AFileStatus extractOrFetchSimpleFileStatus( */ @Override @Retries.RetryTranslated + @AuditEntryPoint public CompletableFuture openFileWithOptions( final Path rawPath, final OpenFileParameters parameters) throws IOException { @@ -4999,15 +5340,19 @@ public CompletableFuture openFileWithOptions( } @Override + @AuditEntryPoint public S3AMultipartUploaderBuilder createMultipartUploader( final Path basePath) throws IOException { - StoreContext ctx = createStoreContext(); - return new S3AMultipartUploaderBuilder(this, - getWriteOperationHelper(), - ctx, - basePath, - statisticsContext.createMultipartUploaderStatistics()); + final Path path = makeQualified(basePath); + try (AuditSpan span = entryPoint(MULTIPART_UPLOAD_INSTANTIATED, path)) { + StoreContext ctx = createStoreContext(); + return new S3AMultipartUploaderBuilder(this, + createWriteOperationHelper(span), + ctx, + path, + statisticsContext.createMultipartUploaderStatistics()); + } } /** @@ -5036,16 +5381,24 @@ public StoreContext createStoreContext() { .setUseListV1(useListV1) .setContextAccessors(new ContextAccessorsImpl()) .setTimeProvider(getTtlTimeProvider()) + .setAuditor(getAuditor()) .build(); } /** * Create a marker tools operations binding for this store. + * Auditing: + * @param target target path * @return callbacks for operations. + * @throws IOException if raised during span creation */ + @AuditEntryPoint @InterfaceAudience.Private - public MarkerToolOperations createMarkerToolOperations() { - return new MarkerToolOperationsImpl(operationCallbacks); + public MarkerToolOperations createMarkerToolOperations(final String target) + throws IOException { + createSpan("marker-tool-scan", target, + null); + return new MarkerToolOperationsImpl(new OperationCallbacksImpl()); } /** @@ -5089,10 +5442,13 @@ public Path makeQualified(final Path path) { } @Override - public ObjectMetadata getObjectMetadata(final String key) - throws IOException { - return once("getObjectMetadata", key, () -> - S3AFileSystem.this.getObjectMetadata(key)); + public AuditSpan getActiveAuditSpan() { + return S3AFileSystem.this.getActiveAuditSpan(); + } + + @Override + public RequestFactory getRequestFactory() { + return S3AFileSystem.this.getRequestFactory(); } } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java index c725fdf37ff8a..d56d4ac433eb7 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java @@ -20,14 +20,11 @@ import javax.annotation.Nullable; -import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.s3.model.SSECustomerKey; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CanSetReadahead; @@ -45,6 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.Closeable; import java.io.EOFException; import java.io.IOException; import java.net.SocketTimeoutException; @@ -101,7 +99,7 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead, private S3Object object; private S3ObjectInputStream wrappedStream; private final S3AReadOpContext context; - private final AmazonS3 client; + private final InputStreamCallbacks client; private final String bucket; private final String key; private final String pathStr; @@ -110,8 +108,6 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead, private static final Logger LOG = LoggerFactory.getLogger(S3AInputStream.class); private final S3AInputStreamStatistics streamStatistics; - private S3AEncryptionMethods serverSideEncryptionAlgorithm; - private String serverSideEncryptionKey; private S3AInputPolicy inputPolicy; private long readahead = Constants.DEFAULT_READAHEAD_RANGE; @@ -150,7 +146,7 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead, */ public S3AInputStream(S3AReadOpContext ctx, S3ObjectAttributes s3Attributes, - AmazonS3 client) { + InputStreamCallbacks client) { Preconditions.checkArgument(isNotEmpty(s3Attributes.getBucket()), "No Bucket"); Preconditions.checkArgument(isNotEmpty(s3Attributes.getKey()), "No Key"); @@ -166,9 +162,6 @@ public S3AInputStream(S3AReadOpContext ctx, this.streamStatistics = ctx.getS3AStatisticsContext() .newInputStreamStatistics(); this.ioStatistics = streamStatistics.getIOStatistics(); - this.serverSideEncryptionAlgorithm = - s3Attributes.getServerSideEncryptionAlgorithm(); - this.serverSideEncryptionKey = s3Attributes.getServerSideEncryptionKey(); this.changeTracker = new ChangeTracker(uri, ctx.getChangeDetectionPolicy(), streamStatistics.getChangeTrackerStatistics(), @@ -211,16 +204,13 @@ private synchronized void reopen(String reason, long targetPos, long length, inputPolicy); long opencount = streamStatistics.streamOpened(); - GetObjectRequest request = new GetObjectRequest(bucket, key) + GetObjectRequest request = client.newGetRequest(key) .withRange(targetPos, contentRangeFinish - 1); - if (S3AEncryptionMethods.SSE_C.equals(serverSideEncryptionAlgorithm) && - StringUtils.isNotBlank(serverSideEncryptionKey)){ - request.setSSECustomerKey(new SSECustomerKey(serverSideEncryptionKey)); - } String operation = opencount == 0 ? OPERATION_OPEN : OPERATION_REOPEN; String text = String.format("%s %s at %d", operation, uri, targetPos); changeTracker.maybeApplyConstraint(request); + DurationTracker tracker = streamStatistics.initiateGetRequest(); try { object = Invoker.once(text, uri, @@ -567,6 +557,8 @@ public synchronized void close() throws IOException { // close or abort the stream closeStream("close() operation", this.contentRangeFinish, false); LOG.debug("Statistics of stream {}\n{}", key, streamStatistics); + // end the client+audit span. + client.close(); // this is actually a no-op super.close(); } finally { @@ -908,4 +900,27 @@ boolean isObjectStreamOpen() { public IOStatistics getIOStatistics() { return ioStatistics; } + + /** + * Callbacks for input stream IO. + */ + public interface InputStreamCallbacks extends Closeable { + + /** + * Create a GET request. + * @param key object key + * @return the request + */ + GetObjectRequest newGetRequest(String key); + + /** + * Execute the request. + * @param request the request + * @return the response + */ + @Retries.OnceRaw + S3Object getObject(GetObjectRequest request); + + } + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java index 169a74abef2d7..a185bac2cb4b0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java @@ -28,15 +28,16 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.s3a.s3guard.MetastoreInstrumentation; -import org.apache.hadoop.fs.s3a.statistics.impl.AbstractS3AStatisticsSource; +import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; import org.apache.hadoop.fs.s3a.statistics.ChangeTrackerStatistics; import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; import org.apache.hadoop.fs.s3a.statistics.CountersAndGauges; -import org.apache.hadoop.fs.s3a.statistics.impl.CountingChangeTracker; import org.apache.hadoop.fs.s3a.statistics.DelegationTokenStatistics; import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; -import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; import org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum; +import org.apache.hadoop.fs.s3a.statistics.impl.AbstractS3AStatisticsSource; +import org.apache.hadoop.fs.s3a.statistics.impl.CountingChangeTracker; +import org.apache.hadoop.fs.s3a.statistics.impl.ForwardingIOStatisticsStore; import org.apache.hadoop.fs.statistics.DurationTrackerFactory; import org.apache.hadoop.fs.statistics.IOStatisticsLogging; import org.apache.hadoop.fs.statistics.IOStatisticsSource; @@ -468,6 +469,14 @@ public DurationTracker trackDuration(final String key, final long count) { return durationTrackerFactory.trackDuration(key, count); } + /** + * Create an IOStatistics store which updates FS metrics + * as well as IOStatistics. + */ + public IOStatisticsStore createMetricsUpdatingStore() { + return new MetricsUpdatingIOStatisticsStore(); + } + /** * String representation. Includes the IOStatistics * when logging is at DEBUG. @@ -547,10 +556,24 @@ public void errorIgnored() { * @param count increment value */ public void incrementCounter(Statistic op, long count) { - String name = op.getSymbol(); + incrementNamedCounter(op.getSymbol(), count); + } + + /** + * Increments a mutable counter and the matching + * instance IOStatistics counter. + * No-op if the counter is not defined, or the count == 0. + * @param name counter name + * @param count increment value + * @return the updated value or, if the counter is unknown: 0 + */ + private long incrementNamedCounter(final String name, + final long count) { if (count != 0) { incrementMutableCounter(name, count); - instanceIOStatistics.incrementCounter(name, count); + return instanceIOStatistics.incrementCounter(name, count); + } else { + return 0; } } @@ -1868,4 +1891,43 @@ public Map getMap() { return map; } } + + /** + * An IOStatisticsStore which updates metrics on calls to + * {@link #incrementCounter(String, long)}. + * This helps keeps FS metrics and IOStats in sync. + * Duration tracking methods are forwarded to + * the S3A Instrumentation duration tracker, which will + * update the instance IOStatistics. + */ + private final class MetricsUpdatingIOStatisticsStore + extends ForwardingIOStatisticsStore { + + private MetricsUpdatingIOStatisticsStore() { + super(S3AInstrumentation.this.getIOStatistics()); + } + + /** + * Incrementing the counter also implements the metric alongside + * the IOStatistics value. + * @param key counter key + * @param value increment value. + * @return the value in the wrapped IOStatistics. + */ + @Override + public long incrementCounter(final String key, final long value) { + incrementMutableCounter(key, value); + return super.incrementCounter(key, value); + } + + @Override + public DurationTracker trackDuration(final String key, final long count) { + return S3AInstrumentation.this.trackDuration(key, count); + } + + @Override + public DurationTracker trackDuration(final String key) { + return S3AInstrumentation.this.trackDuration(key); + } + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ALocatedFileStatus.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ALocatedFileStatus.java index 10295484fe815..725a7fe0971b8 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ALocatedFileStatus.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ALocatedFileStatus.java @@ -66,6 +66,7 @@ public int hashCode() { /** * Generate an S3AFileStatus instance, including etag and * version ID, if present. + * @return the S3A status. */ public S3AFileStatus toS3AFileStatus() { return new S3AFileStatus( diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java index 3729341dbfe27..43398fc4a167b 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; +import org.apache.hadoop.fs.store.audit.AuditSpan; import javax.annotation.Nullable; @@ -55,6 +56,8 @@ public class S3AReadOpContext extends S3AOpContext { */ private final long readahead; + private final AuditSpan auditSpan; + /** * Instantiate. * @param path path of read @@ -65,8 +68,9 @@ public class S3AReadOpContext extends S3AOpContext { * @param instrumentation statistics context * @param dstFileStatus target file status * @param inputPolicy the input policy - * @param readahead readahead for GET operations/skip, etc. * @param changeDetectionPolicy change detection policy. + * @param readahead readahead for GET operations/skip, etc. + * @param auditSpan active audit */ public S3AReadOpContext( final Path path, @@ -78,11 +82,13 @@ public S3AReadOpContext( FileStatus dstFileStatus, S3AInputPolicy inputPolicy, ChangeDetectionPolicy changeDetectionPolicy, - final long readahead) { + final long readahead, + final AuditSpan auditSpan) { super(isS3GuardEnabled, invoker, s3guardInvoker, stats, instrumentation, dstFileStatus); this.path = checkNotNull(path); + this.auditSpan = auditSpan; Preconditions.checkArgument(readahead >= 0, "invalid readahead %d", readahead); this.inputPolicy = checkNotNull(inputPolicy); @@ -133,6 +139,14 @@ public long getReadahead() { return readahead; } + /** + * Get the audit which was active when the file was opened. + * @return active span + */ + public AuditSpan getAuditSpan() { + return auditSpan; + } + @Override public String toString() { final StringBuilder sb = new StringBuilder( diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java index 314f13f0352e9..b6af8e7f27c20 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java @@ -34,7 +34,6 @@ import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.MultiObjectDeleteException; import com.amazonaws.services.s3.model.S3ObjectSummary; - import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @@ -42,7 +41,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; @@ -91,6 +89,7 @@ import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isUnknownBucket; import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.translateDeleteException; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; +import static org.apache.hadoop.util.functional.RemoteIterators.filteringRemoteIterator; /** * Utility methods for S3A code. @@ -201,7 +200,8 @@ public static IOException translateException(@Nullable String operation, } if (exception instanceof CredentialInitializationException) { // the exception raised by AWSCredentialProvider list if the - // credentials were not accepted. + // credentials were not accepted, + // or auditing blocked the operation. return (AccessDeniedException)new AccessDeniedException(path, null, exception.toString()).initCause(exception); } @@ -1423,23 +1423,19 @@ private static void initUserAgent(Configuration conf, * an array. Given tombstones are filtered out. If the iterator * does return any item, an empty array is returned. * @param iterator a non-null iterator - * @param tombstones + * @param tombstones possibly empty set of tombstones * @return a possibly-empty array of file status entries - * @throws IOException + * @throws IOException failure */ public static S3AFileStatus[] iteratorToStatuses( RemoteIterator iterator, Set tombstones) throws IOException { - List statuses = new ArrayList<>(); - - while (iterator.hasNext()) { - S3AFileStatus status = iterator.next(); - if (!tombstones.contains(status.getPath())) { - statuses.add(status); - } - } - - return statuses.toArray(new S3AFileStatus[0]); + // this will close the span afterwards + RemoteIterator source = filteringRemoteIterator(iterator, + st -> !tombstones.contains(st.getPath())); + S3AFileStatus[] statuses = RemoteIterators + .toArray(source, new S3AFileStatus[0]); + return statuses; } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java index dbb39fb662408..5ef99ed6f5c3c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java @@ -115,6 +115,11 @@ final class S3ClientCreationParameters { */ private String userAgentSuffix = ""; + /** + * List of request handlers to include in the chain + * of request execution in the SDK. + * @return the handler list + */ public List getRequestHandlers() { return requestHandlers; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java index c613c06c9bb4e..7890e2d946b76 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java @@ -22,6 +22,7 @@ import java.util.Map; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.audit.AuditStatisticNames; import org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum; import org.apache.hadoop.fs.statistics.StoreStatisticNames; import org.apache.hadoop.fs.statistics.StreamStatisticNames; @@ -30,6 +31,7 @@ import static org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum.TYPE_DURATION; import static org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum.TYPE_GAUGE; import static org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum.TYPE_QUANTILE; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_FAILURES; /** * Statistic which are collected in S3A. @@ -93,14 +95,18 @@ public enum Statistic { StoreStatisticNames.OP_ABORT, "Calls of abort()", TYPE_DURATION), + INVOCATION_ACCESS( + StoreStatisticNames.OP_ACCESS, + "Calls of access()", + TYPE_DURATION), INVOCATION_COPY_FROM_LOCAL_FILE( StoreStatisticNames.OP_COPY_FROM_LOCAL_FILE, "Calls of copyFromLocalFile()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_CREATE( StoreStatisticNames.OP_CREATE, "Calls of create()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_CREATE_NON_RECURSIVE( StoreStatisticNames.OP_CREATE_NON_RECURSIVE, "Calls of createNonRecursive()", @@ -108,35 +114,39 @@ public enum Statistic { INVOCATION_DELETE( StoreStatisticNames.OP_DELETE, "Calls of delete()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_EXISTS( StoreStatisticNames.OP_EXISTS, "Calls of exists()", - TYPE_COUNTER), + TYPE_DURATION), + INVOCATION_GET_CONTENT_SUMMARY( + StoreStatisticNames.OP_GET_CONTENT_SUMMARY, + "Calls of getContentSummary()", + TYPE_DURATION), INVOCATION_GET_DELEGATION_TOKEN( StoreStatisticNames.OP_GET_DELEGATION_TOKEN, "Calls of getDelegationToken()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_GET_FILE_CHECKSUM( StoreStatisticNames.OP_GET_FILE_CHECKSUM, "Calls of getFileChecksum()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_GET_FILE_STATUS( StoreStatisticNames.OP_GET_FILE_STATUS, "Calls of getFileStatus()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_GLOB_STATUS( StoreStatisticNames.OP_GLOB_STATUS, "Calls of globStatus()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_IS_DIRECTORY( StoreStatisticNames.OP_IS_DIRECTORY, "Calls of isDirectory()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_IS_FILE( StoreStatisticNames.OP_IS_FILE, "Calls of isFile()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_HFLUSH( StoreStatisticNames.OP_HFLUSH, "Calls of hflush()", @@ -148,7 +158,7 @@ public enum Statistic { INVOCATION_LIST_FILES( StoreStatisticNames.OP_LIST_FILES, "Calls of listFiles()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_LIST_LOCATED_STATUS( StoreStatisticNames.OP_LIST_LOCATED_STATUS, "Calls of listLocatedStatus()", @@ -156,11 +166,11 @@ public enum Statistic { INVOCATION_LIST_STATUS( StoreStatisticNames.OP_LIST_STATUS, "Calls of listStatus()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_MKDIRS( StoreStatisticNames.OP_MKDIRS, "Calls of mkdirs()", - TYPE_COUNTER), + TYPE_DURATION), INVOCATION_OPEN( StoreStatisticNames.OP_OPEN, "Calls of open()", @@ -168,7 +178,7 @@ public enum Statistic { INVOCATION_RENAME( StoreStatisticNames.OP_RENAME, "Calls of rename()", - TYPE_COUNTER), + TYPE_DURATION), /* The XAttr API metrics are all durations */ INVOCATION_XATTR_GET_MAP( @@ -215,15 +225,15 @@ public enum Statistic { OBJECT_MULTIPART_UPLOAD_INITIATED( StoreStatisticNames.OBJECT_MULTIPART_UPLOAD_INITIATED, "Object multipart upload initiated", - TYPE_COUNTER), + TYPE_DURATION), OBJECT_MULTIPART_UPLOAD_ABORTED( StoreStatisticNames.OBJECT_MULTIPART_UPLOAD_ABORTED, "Object multipart upload aborted", - TYPE_COUNTER), + TYPE_DURATION), OBJECT_PUT_REQUESTS( StoreStatisticNames.OBJECT_PUT_REQUEST, "Object put/multipart upload count", - TYPE_COUNTER), + TYPE_DURATION), OBJECT_PUT_REQUESTS_COMPLETED( StoreStatisticNames.OBJECT_PUT_REQUEST_COMPLETED, "Object put/multipart upload completed count", @@ -421,7 +431,7 @@ public enum Statistic { "Count of bytes uploaded duing commit operations", TYPE_COUNTER), COMMITTER_COMMITS_FAILED( - "committer_commits"+ StoreStatisticNames.SUFFIX_FAILURES, + "committer_commits"+ SUFFIX_FAILURES, "Count of commits failed", TYPE_COUNTER), COMMITTER_COMMITS_ABORTED( @@ -487,6 +497,9 @@ public enum Statistic { /* General Store operations */ + STORE_EXISTS_PROBE(StoreStatisticNames.STORE_EXISTS_PROBE, + "Store Existence Probe", + TYPE_DURATION), STORE_IO_REQUEST(StoreStatisticNames.STORE_IO_REQUEST, "requests made of the remote store", TYPE_COUNTER), @@ -538,9 +551,32 @@ public enum Statistic { StoreStatisticNames.MULTIPART_UPLOAD_COMPLETED, "Multipart Upload Completed", TYPE_COUNTER), + MULTIPART_UPLOAD_LIST( + StoreStatisticNames.MULTIPART_UPLOAD_LIST, + "Multipart Upload List", + TYPE_DURATION), MULTIPART_UPLOAD_STARTED( StoreStatisticNames.MULTIPART_UPLOAD_STARTED, "Multipart Upload Started", + TYPE_COUNTER), + + /* Audit statistics. */ + /* If more are added: update AuditTestSupport to include the new values. */ + AUDIT_ACCESS_CHECK_FAILURE( + AuditStatisticNames.AUDIT_ACCESS_CHECK_FAILURE, + "Audit access check was rejected", + TYPE_COUNTER), + AUDIT_SPAN_CREATION( + AuditStatisticNames.AUDIT_SPAN_CREATION, + "Audit Span Created", + TYPE_COUNTER), + AUDIT_FAILURE( + AuditStatisticNames.AUDIT_FAILURE, + "Audit failure/rejection", + TYPE_COUNTER), + AUDIT_REQUEST_EXECUTION( + AuditStatisticNames.AUDIT_REQUEST_EXECUTION, + "AWS request made", TYPE_COUNTER); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java index 8b71fc3277142..2eb4841d9bd06 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java @@ -23,7 +23,6 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; -import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; @@ -51,19 +50,20 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.impl.StoreContext; import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; import org.apache.hadoop.fs.s3a.s3guard.S3Guard; import org.apache.hadoop.fs.s3a.select.SelectBinding; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.fs.store.audit.AuditSpanSource; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.functional.CallableRaisingIOE; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.Invoker.*; -import static org.apache.hadoop.fs.s3a.S3AUtils.longOption; -import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DEFAULT_UPLOAD_PART_COUNT_LIMIT; -import static org.apache.hadoop.fs.s3a.impl.InternalConstants.UPLOAD_PART_COUNT_LIMIT; +import static org.apache.hadoop.fs.store.audit.AuditingFunctions.withinAuditSpan; /** * Helper for low-level operations against an S3 Bucket for writing data, @@ -87,6 +87,17 @@ * * * This API is for internal use only. + * Span scoping: This helper is instantiated with span; it will be used + * before operations which query/update S3 + * + * History + *
    + * - A nested class in S3AFileSystem
    + * - Single shared instance created and reused.
    + * - [HADOOP-13786] A separate class, single instance in S3AFS
    + * - [HDFS-13934] Split into interface and implementation
    + * - [HADOOP-15711] Adds audit tracking; one instance per use.
    + * 
    */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -116,22 +127,50 @@ public class WriteOperationHelper implements WriteOperations { */ private final S3AStatisticsContext statisticsContext; + /** + * Store Context; extracted from owner. + */ + private final StoreContext storeContext; + + /** + * Source of Audit spans. + */ + private final AuditSpanSource auditSpanSource; + + /** + * Audit Span. + */ + private AuditSpan auditSpan; + + /** + * Factory for AWS requests. + */ + private final RequestFactory requestFactory; + /** * Constructor. * @param owner owner FS creating the helper * @param conf Configuration object * @param statisticsContext statistics context + * @param auditSpanSource source of spans + * @param auditSpan span to activate * */ protected WriteOperationHelper(S3AFileSystem owner, Configuration conf, - S3AStatisticsContext statisticsContext) { + S3AStatisticsContext statisticsContext, + final AuditSpanSource auditSpanSource, + final AuditSpan auditSpan) { this.owner = owner; this.invoker = new Invoker(new S3ARetryPolicy(conf), this::operationRetried); this.conf = conf; this.statisticsContext = statisticsContext; - bucket = owner.getBucket(); + this.storeContext = owner.createStoreContext(); + this.bucket = owner.getBucket(); + this.auditSpanSource = auditSpanSource; + this.auditSpan = checkNotNull(auditSpan); + this.requestFactory = owner.getRequestFactory(); } /** @@ -150,6 +189,7 @@ void operationRetried(String text, Exception ex, int retries, /** * Execute a function with retry processing. + * Also activates the current span. * @param type of return value * @param action action to execute (used in error messages) * @param path path of work (used in error messages) @@ -164,10 +204,33 @@ public T retry(String action, boolean idempotent, CallableRaisingIOE operation) throws IOException { - + activateAuditSpan(); return invoker.retry(action, path, idempotent, operation); } + /** + * Get the audit span this object was created with. + * @return the audit span + */ + public AuditSpan getAuditSpan() { + return auditSpan; + } + + /** + * Activate the audit span. + * @return the span + */ + private AuditSpan activateAuditSpan() { + return auditSpan.activate(); + } + + /** + * Deactivate the audit span. + */ + private void deactivateAuditSpan() { + auditSpan.deactivate(); + } + /** * Create a {@link PutObjectRequest} request against the specific key. * @param destKey destination key @@ -176,15 +239,18 @@ public T retry(String action, * @param headers optional map of custom headers. * @return the request */ + @Retries.OnceRaw public PutObjectRequest createPutObjectRequest(String destKey, InputStream inputStream, long length, final Map headers) { + activateAuditSpan(); ObjectMetadata objectMetadata = newObjectMetadata(length); if (headers != null) { objectMetadata.setUserMetadata(headers); } - return owner.newPutObjectRequest(destKey, + return getRequestFactory().newPutObjectRequest( + destKey, objectMetadata, inputStream); } @@ -195,13 +261,16 @@ public PutObjectRequest createPutObjectRequest(String destKey, * @param sourceFile source file * @return the request */ + @Retries.OnceRaw public PutObjectRequest createPutObjectRequest(String dest, File sourceFile) { Preconditions.checkState(sourceFile.length() < Integer.MAX_VALUE, "File length is too big for a single PUT upload"); - return owner.newPutObjectRequest(dest, - newObjectMetadata((int) sourceFile.length()), - sourceFile); + activateAuditSpan(); + return getRequestFactory(). + newPutObjectRequest(dest, + newObjectMetadata((int) sourceFile.length()), + sourceFile); } /** @@ -227,7 +296,7 @@ public void writeFailed(Exception ex) { * @return a new metadata instance */ public ObjectMetadata newObjectMetadata(long length) { - return owner.newObjectMetadata(length); + return getRequestFactory().newObjectMetadata(length); } /** @@ -240,15 +309,16 @@ public ObjectMetadata newObjectMetadata(long length) { @Retries.RetryTranslated public String initiateMultiPartUpload(String destKey) throws IOException { LOG.debug("Initiating Multipart upload to {}", destKey); - final InitiateMultipartUploadRequest initiateMPURequest = - new InitiateMultipartUploadRequest(bucket, - destKey, - newObjectMetadata(-1)); - initiateMPURequest.setCannedACL(owner.getCannedACL()); - owner.setOptionalMultipartUploadRequestParameters(initiateMPURequest); - - return retry("initiate MultiPartUpload", destKey, true, - () -> owner.initiateMultipartUpload(initiateMPURequest).getUploadId()); + try (AuditSpan span = activateAuditSpan()) { + return retry("initiate MultiPartUpload", destKey, true, + () -> { + final InitiateMultipartUploadRequest initiateMPURequest = + getRequestFactory().newMultipartUploadRequest( + destKey); + return owner.initiateMultipartUpload(initiateMPURequest) + .getUploadId(); + }); + } } /** @@ -278,23 +348,22 @@ private CompleteMultipartUploadResult finalizeMultipartUpload( throw new PathIOException(destKey, "No upload parts in multipart upload"); } - CompleteMultipartUploadResult uploadResult = - invoker.retry("Completing multipart upload", destKey, - true, - retrying, - () -> { - // a copy of the list is required, so that the AWS SDK doesn't - // attempt to sort an unmodifiable list. - return owner.getAmazonS3Client().completeMultipartUpload( - new CompleteMultipartUploadRequest(bucket, - destKey, - uploadId, - new ArrayList<>(partETags))); - } - ); - owner.finishedWrite(destKey, length, uploadResult.getETag(), - uploadResult.getVersionId(), operationState); - return uploadResult; + try (AuditSpan span = activateAuditSpan()) { + CompleteMultipartUploadResult uploadResult; + uploadResult = invoker.retry("Completing multipart upload", destKey, + true, + retrying, + () -> { + final CompleteMultipartUploadRequest request = + getRequestFactory().newCompleteMultipartUploadRequest( + destKey, uploadId, partETags); + return owner.getAmazonS3Client().completeMultipartUpload( + request); + }); + owner.finishedWrite(destKey, length, uploadResult.getETag(), + uploadResult.getVersionId(), operationState); + return uploadResult; + } } /** @@ -351,16 +420,17 @@ public void abortMultipartUpload(String destKey, String uploadId, destKey, true, retrying, - () -> owner.abortMultipartUpload( - destKey, - uploadId)); + withinAuditSpan(getAuditSpan(), () -> + owner.abortMultipartUpload( + destKey, uploadId))); } else { // single pass attempt. once("Aborting multipart upload ID " + uploadId, destKey, - () -> owner.abortMultipartUpload( - destKey, - uploadId)); + withinAuditSpan(getAuditSpan(), () -> + owner.abortMultipartUpload( + destKey, + uploadId))); } } @@ -373,7 +443,8 @@ public void abortMultipartUpload(String destKey, String uploadId, public void abortMultipartUpload(MultipartUpload upload) throws IOException { invoker.retry("Aborting multipart commit", upload.getKey(), true, - () -> owner.abortMultipartUpload(upload)); + withinAuditSpan(getAuditSpan(), + () -> owner.abortMultipartUpload(upload))); } @@ -389,7 +460,7 @@ public int abortMultipartUploadsUnderPath(String prefix) throws IOException { LOG.debug("Aborting multipart uploads under {}", prefix); int count = 0; - List multipartUploads = owner.listMultipartUploads(prefix); + List multipartUploads = listMultipartUploads(prefix); LOG.debug("Number of outstanding uploads: {}", multipartUploads.size()); for (MultipartUpload upload: multipartUploads) { try { @@ -402,6 +473,14 @@ public int abortMultipartUploadsUnderPath(String prefix) return count; } + @Override + @Retries.RetryTranslated + public List listMultipartUploads(final String prefix) + throws IOException { + activateAuditSpan(); + return owner.listMultipartUploads(prefix); + } + /** * Abort a multipart commit operation. * @param destKey destination key of ongoing operation @@ -409,6 +488,7 @@ public int abortMultipartUploadsUnderPath(String prefix) * @throws IOException on problems. * @throws FileNotFoundException if the abort ID is unknown */ + @Override @Retries.RetryTranslated public void abortMultipartCommit(String destKey, String uploadId) throws IOException { @@ -423,6 +503,7 @@ public void abortMultipartCommit(String destKey, String uploadId) * in {@code offset} and a length of block in {@code size} equal to * or less than the remaining bytes. * The part number must be less than 10000. + * Retry policy is once-translated; to much effort * @param destKey destination key of ongoing operation * @param uploadId ID of ongoing upload * @param partNumber current part number of the upload @@ -431,9 +512,11 @@ public void abortMultipartCommit(String destKey, String uploadId) * @param sourceFile optional source file. * @param offset offset in file to start reading. * @return the request. - * @throws IllegalArgumentException if the parameters are invalid -including + * @throws IllegalArgumentException if the parameters are invalid. * @throws PathIOException if the part number is out of range. */ + @Override + @Retries.OnceTranslated public UploadPartRequest newUploadPartRequest( String destKey, String uploadId, @@ -441,52 +524,17 @@ public UploadPartRequest newUploadPartRequest( int size, InputStream uploadStream, File sourceFile, - Long offset) throws PathIOException { - checkNotNull(uploadId); - // exactly one source must be set; xor verifies this - checkArgument((uploadStream != null) ^ (sourceFile != null), - "Data source"); - checkArgument(size >= 0, "Invalid partition size %s", size); - checkArgument(partNumber > 0, - "partNumber must be between 1 and %s inclusive, but is %s", - DEFAULT_UPLOAD_PART_COUNT_LIMIT, partNumber); - - LOG.debug("Creating part upload request for {} #{} size {}", - uploadId, partNumber, size); - long partCountLimit = longOption(conf, - UPLOAD_PART_COUNT_LIMIT, - DEFAULT_UPLOAD_PART_COUNT_LIMIT, - 1); - if (partCountLimit != DEFAULT_UPLOAD_PART_COUNT_LIMIT) { - LOG.warn("Configuration property {} shouldn't be overridden by client", - UPLOAD_PART_COUNT_LIMIT); - } - final String pathErrorMsg = "Number of parts in multipart upload exceeded." - + " Current part count = %s, Part count limit = %s "; - if (partNumber > partCountLimit) { - throw new PathIOException(destKey, - String.format(pathErrorMsg, partNumber, partCountLimit)); - } - UploadPartRequest request = new UploadPartRequest() - .withBucketName(bucket) - .withKey(destKey) - .withUploadId(uploadId) - .withPartNumber(partNumber) - .withPartSize(size); - if (uploadStream != null) { - // there's an upload stream. Bind to it. - request.setInputStream(uploadStream); - } else { - checkArgument(sourceFile.exists(), - "Source file does not exist: %s", sourceFile); - checkArgument(offset >= 0, "Invalid offset %s", offset); - long length = sourceFile.length(); - checkArgument(offset == 0 || offset < length, - "Offset %s beyond length of file %s", offset, length); - request.setFile(sourceFile); - request.setFileOffset(offset); - } - return request; + Long offset) throws IOException { + return once("upload part request", destKey, + withinAuditSpan(getAuditSpan(), () -> + getRequestFactory().newUploadPartRequest( + destKey, + uploadId, + partNumber, + size, + uploadStream, + sourceFile, + offset))); } /** @@ -514,7 +562,8 @@ public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws IOException { return retry("Writing Object", putObjectRequest.getKey(), true, - () -> owner.putObjectDirect(putObjectRequest)); + withinAuditSpan(getAuditSpan(), () -> + owner.putObjectDirect(putObjectRequest))); } /** @@ -529,7 +578,8 @@ public UploadResult uploadObject(PutObjectRequest putObjectRequest) // no retry; rely on xfer manager logic return retry("Writing Object", putObjectRequest.getKey(), true, - () -> owner.executePut(putObjectRequest, null)); + withinAuditSpan(getAuditSpan(), () -> + owner.executePut(putObjectRequest, null))); } /** @@ -543,13 +593,12 @@ public UploadResult uploadObject(PutObjectRequest putObjectRequest) public void revertCommit(String destKey, @Nullable BulkOperationState operationState) throws IOException { once("revert commit", destKey, - () -> { + withinAuditSpan(getAuditSpan(), () -> { Path destPath = owner.keyToQualifiedPath(destKey); owner.deleteObjectAtPath(destPath, destKey, true, operationState); owner.maybeCreateFakeParentDirectory(destPath); - } - ); + })); } /** @@ -620,10 +669,11 @@ public BulkOperationState initiateOperation(final Path path, public UploadPartResult uploadPart(UploadPartRequest request) throws IOException { return retry("upload part #" + request.getPartNumber() - + " upload ID "+ request.getUploadId(), + + " upload ID " + request.getUploadId(), request.getKey(), true, - () -> owner.uploadPart(request)); + withinAuditSpan(getAuditSpan(), + () -> owner.uploadPart(request))); } /** @@ -642,10 +692,10 @@ public Configuration getConf() { * @return the request */ public SelectObjectContentRequest newSelectRequest(Path path) { - SelectObjectContentRequest request = new SelectObjectContentRequest(); - request.setBucketName(bucket); - request.setKey(owner.pathToKey(path)); - return request; + try (AuditSpan span = getAuditSpan()) { + return getRequestFactory().newSelectRequest( + storeContext.pathToKey(path)); + } } /** @@ -664,6 +714,8 @@ public SelectObjectContentResult select( final SelectObjectContentRequest request, final String action) throws IOException { + // no setting of span here as the select binding is (statically) created + // without any span. String bucketName = request.getBucketName(); Preconditions.checkArgument(bucket.equals(bucketName), "wrong bucket: %s", bucketName); @@ -676,7 +728,7 @@ public SelectObjectContentResult select( action, source.toString(), true, - () -> { + withinAuditSpan(getAuditSpan(), () -> { try (DurationInfo ignored = new DurationInfo(LOG, "S3 Select operation")) { try { @@ -691,11 +743,35 @@ public SelectObjectContentResult select( throw e; } } - }); + })); + } + + @Override + public AuditSpan createSpan(final String operation, + @Nullable final String path1, + @Nullable final String path2) throws IOException { + return auditSpanSource.createSpan(operation, path1, path2); } @Override public void incrementWriteOperations() { owner.incrementWriteOperations(); } + + /** + * Deactivate the audit span. + */ + @Override + public void close() throws IOException { + deactivateAuditSpan(); + } + + /** + * Get the request factory which uses this store's audit span. + * @return the request factory. + */ + public RequestFactory getRequestFactory() { + return requestFactory; + } + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java index 0a8150ce34dbb..9400ef2c490fe 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.s3a; import javax.annotation.Nullable; +import java.io.Closeable; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; @@ -43,6 +44,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; +import org.apache.hadoop.fs.store.audit.AuditSpanSource; import org.apache.hadoop.util.functional.CallableRaisingIOE; /** @@ -54,7 +56,7 @@ * use `WriteOperationHelper` directly. * @since Hadoop 3.3.0 */ -public interface WriteOperations { +public interface WriteOperations extends AuditSpanSource, Closeable { /** * Execute a function with retry processing. @@ -184,6 +186,16 @@ void abortMultipartUpload(MultipartUpload upload) int abortMultipartUploadsUnderPath(String prefix) throws IOException; + /** + * Abort multipart uploads under a path: limited to the first + * few hundred. + * @param prefix prefix for uploads to abort + * @return a count of aborts + * @throws IOException trouble; FileNotFoundExceptions are swallowed. + */ + List listMultipartUploads(String prefix) + throws IOException; + /** * Abort a multipart commit operation. * @param destKey destination key of ongoing operation @@ -210,7 +222,7 @@ void abortMultipartCommit(String destKey, String uploadId) * @param sourceFile optional source file. * @param offset offset in file to start reading. * @return the request. - * @throws IllegalArgumentException if the parameters are invalid -including + * @throws IllegalArgumentException if the parameters are invalid * @throws PathIOException if the part number is out of range. */ UploadPartRequest newUploadPartRequest( @@ -220,7 +232,7 @@ UploadPartRequest newUploadPartRequest( int size, InputStream uploadStream, File sourceFile, - Long offset) throws PathIOException; + Long offset) throws IOException; /** * PUT an object directly (i.e. not via the transfer manager). diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java new file mode 100644 index 0000000000000..9bffcc90d0bd4 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java @@ -0,0 +1,296 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.api; + +import javax.annotation.Nullable; +import java.io.File; +import java.io.InputStream; +import java.util.List; +import java.util.Optional; + +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CannedAccessControlList; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CopyObjectRequest; +import com.amazonaws.services.s3.model.DeleteObjectRequest; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.GetObjectMetadataRequest; +import com.amazonaws.services.s3.model.GetObjectRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; +import com.amazonaws.services.s3.model.ListNextBatchOfObjectsRequest; +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ListObjectsV2Request; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.SSEAwsKeyManagementParams; +import com.amazonaws.services.s3.model.SSECustomerKey; +import com.amazonaws.services.s3.model.SelectObjectContentRequest; +import com.amazonaws.services.s3.model.UploadPartRequest; + +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; +import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecrets; + +/** + * Factory for S3 objects. + * + * This is where the owner FS's {@code prepareRequest()} + * callback is invoked to mark up a request for this span. + * + * All AWS request objects MUST BE created through this, in + * the active audit span. + * The standard implementation provides a callback for the S3AFS or + * tests to be invoked to prepare each request. + * Such callbacks SHOULD NOT raise exceptions other + * than argument validation exceptions. + * as there are no guarantees how they are processed. + * That is: no guarantees of retry or translation. + */ +public interface RequestFactory { + + /** + * Set the encryption secrets for all subsequent requests. + * @param secrets encryption secrets. + */ + void setEncryptionSecrets(EncryptionSecrets secrets); + + /** + * Get the canned ACL of this FS. + * @return an ACL, if any + */ + CannedAccessControlList getCannedACL(); + + /** + * Create the AWS SDK structure used to configure SSE, + * if the encryption secrets contain the information/settings for this. + * @return an optional set of KMS Key settings + */ + Optional generateSSEAwsKeyParams(); + + /** + * Create the SSE-C structure for the AWS SDK, if the encryption secrets + * contain the information/settings for this. + * This will contain a secret extracted from the bucket/configuration. + * @return an optional customer key. + */ + Optional generateSSECustomerKey(); + + /** + * Get the encryption algorithm of this endpoint. + * @return the encryption algorithm. + */ + S3AEncryptionMethods getServerSideEncryptionAlgorithm(); + + /** + * Create a new object metadata instance. + * Any standard metadata headers are added here, for example: + * encryption. + * + * @param length length of data to set in header; Ignored if negative + * @return a new metadata instance + */ + ObjectMetadata newObjectMetadata(long length); + + /** + * Create a copy request. + * This includes the work of copying the relevant parts + * of the metadata from the source + * @param srcKey source + * @param dstKey destination + * @param srcom source object metadata. + * @return the request + */ + CopyObjectRequest newCopyObjectRequest(String srcKey, + String dstKey, + ObjectMetadata srcom); + + /** + * Create a putObject request. + * Adds the ACL and metadata + * @param key key of object + * @param metadata metadata header + * @param srcfile source file + * @return the request + */ + PutObjectRequest newPutObjectRequest(String key, + ObjectMetadata metadata, File srcfile); + + /** + * Create a {@link PutObjectRequest} request. + * The metadata is assumed to have been configured with the size of the + * operation. + * @param key key of object + * @param metadata metadata header + * @param inputStream source data. + * @return the request + */ + PutObjectRequest newPutObjectRequest(String key, + ObjectMetadata metadata, + InputStream inputStream); + + /** + * Create a {@link PutObjectRequest} request for creating + * an empty directory. + * + * @param directory destination directory. + * @return request for a zero byte upload. + */ + PutObjectRequest newDirectoryMarkerRequest(String directory); + + /** + * List all multipart uploads under a prefix. + * @param prefix prefix to list under + * @return the request. + */ + ListMultipartUploadsRequest newListMultipartUploadsRequest( + @Nullable String prefix); + + /** + * Abort a multipart upload. + * @param destKey destination object key + * @param uploadId ID of initiated upload + * @return the request. + */ + AbortMultipartUploadRequest newAbortMultipartUploadRequest( + String destKey, + String uploadId); + + /** + * Start a multipart upload. + * @param destKey destination object key + * @return the request. + */ + InitiateMultipartUploadRequest newMultipartUploadRequest( + String destKey); + + /** + * Complete a multipart upload. + * @param destKey destination object key + * @param uploadId ID of initiated upload + * @param partETags ordered list of etags + * @return the request. + */ + CompleteMultipartUploadRequest newCompleteMultipartUploadRequest( + String destKey, + String uploadId, + List partETags); + + /** + * Create a HEAD request. + * @param key key, may have trailing / + * @return the request. + */ + GetObjectMetadataRequest newGetObjectMetadataRequest(String key); + + /** + * Create a GET request. + * @param key object key + * @return the request. + */ + GetObjectRequest newGetObjectRequest(String key); + + /** + * Create and initialize a part request of a multipart upload. + * Exactly one of: {@code uploadStream} or {@code sourceFile} + * must be specified. + * A subset of the file may be posted, by providing the starting point + * in {@code offset} and a length of block in {@code size} equal to + * or less than the remaining bytes. + * @param destKey destination key of ongoing operation + * @param uploadId ID of ongoing upload + * @param partNumber current part number of the upload + * @param size amount of data + * @param uploadStream source of data to upload + * @param sourceFile optional source file. + * @param offset offset in file to start reading. + * @return the request. + * @throws PathIOException if the part number is out of range. + */ + UploadPartRequest newUploadPartRequest( + String destKey, + String uploadId, + int partNumber, + int size, + InputStream uploadStream, + File sourceFile, + long offset) throws PathIOException; + + /** + * Create a S3 Select request for the destination object. + * This does not build the query. + * @param key object key + * @return the request + */ + SelectObjectContentRequest newSelectRequest(String key); + + /** + * Create the (legacy) V1 list request. + * @param key key to list under + * @param delimiter delimiter for keys + * @param maxKeys maximum number in a list page. + * @return the request + */ + ListObjectsRequest newListObjectsV1Request(String key, + String delimiter, + int maxKeys); + + /** + * Create the next V1 page list request, following + * on from the previous response. + * @param prev previous response + * @return the request + */ + + ListNextBatchOfObjectsRequest newListNextBatchOfObjectsRequest( + ObjectListing prev); + + /** + * Create a V2 list request. + * This will be recycled for any subsequent requests. + * @param key key to list under + * @param delimiter delimiter for keys + * @param maxKeys maximum number in a list page. + * @return the request + */ + ListObjectsV2Request newListObjectsV2Request(String key, + String delimiter, + int maxKeys); + + /** + * Create a request to delete a single object. + * @param key object to delete + * @return the request + */ + DeleteObjectRequest newDeleteObjectRequest(String key); + + /** + * Bulk delete request. + * @param keysToDelete list of keys to delete. + * @param quiet should a bulk query be quiet, or should its result list + * all deleted keys? + * @return the request + */ + DeleteObjectsRequest newBulkDeleteRequest( + List keysToDelete, + boolean quiet); + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/package-info.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/package-info.java new file mode 100644 index 0000000000000..625c8a3fca2cc --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/package-info.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Where the interfaces for classes implemented in {@code o.a.h.fs.s3a.impl} + * should go. This is to allow for extension points to use these interfaces + * without having any java module access to the .impl package. + * + * This is public for S3A extension points, however there are no + * guarantees of stability -changes may break things, possibly + * unintentionally. + */ + +@InterfaceAudience.LimitedPrivate("extensions") +@InterfaceStability.Unstable +package org.apache.hadoop.fs.s3a.api; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AWSAuditEventCallbacks.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AWSAuditEventCallbacks.java new file mode 100644 index 0000000000000..8134d5cea942e --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AWSAuditEventCallbacks.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.Request; +import com.amazonaws.Response; +import com.amazonaws.SdkBaseException; +import com.amazonaws.handlers.HandlerAfterAttemptContext; +import com.amazonaws.handlers.HandlerBeforeAttemptContext; +import com.amazonaws.http.HttpResponse; + +import org.apache.hadoop.fs.s3a.Retries; + +/** + * Callbacks for audit spans. This is implemented + * in the span manager as well as individual audit spans. + * If any of the code in a callback raises an InterruptedException, + * it must be caught and {@code Thread.interrupt()} called to + * redeclare the thread as interrupted. The AWS SDK will + * detect this and raise an exception. + * + * Look at the documentation for + * {@code com.amazonaws.handlers.IRequestHandler2} for details + * on the callbacks. + */ +public interface AWSAuditEventCallbacks { + + /** + * Return a span ID which must be unique for all spans within + * everywhere. That effectively means part of the + * span SHOULD be derived from a UUID. + * Callers MUST NOT make any assumptions about the actual + * contents or structure of this string other than the + * uniqueness. + * @return a non-empty string + */ + String getSpanId(); + + /** + * Get the name of the operation. + * @return the operation name. + */ + String getOperationName(); + + /** + * Callback when a request is created in the S3A code. + * This is called in {@code RequestFactoryImpl} after + * each request is created. + * It is not invoked on any AWS requests created in the SDK. + * Avoid raising exceptions or talking to any remote service; + * this callback is for annotation rather than validation. + * @param request request request. + * @param type of request + * @return the request, possibly modified. + */ + default T requestCreated(T request) { + return request; + } + + /** + * Preflight preparation of AWS request. + * @param request request + * @param type of request + * @return an updated request. + * @throws AuditFailureException for generic audit failures + * @throws SdkBaseException for other reasons. + */ + @Retries.OnceRaw + default T beforeExecution(T request) + throws AuditFailureException, SdkBaseException { + return request; + } + + /** + * Callback after S3 responded to a request. + * @param request request + * @param response response. + * @throws AuditFailureException for generic audit failures + * @throws SdkBaseException for other reasons. + */ + default void afterResponse(Request request, + Response response) + throws AuditFailureException, SdkBaseException { + } + + /** + * Callback after a request resulted in an error. + * @param request request + * @param response response. + * @param exception exception raised. + * @throws AuditFailureException for generic audit failures + * @throws SdkBaseException for other reasons. + */ + default void afterError(Request request, + Response response, + Exception exception) + throws AuditFailureException, SdkBaseException { + } + + /** + * Request before marshalling. + * @param request request + * @return possibly modified request. + */ + default AmazonWebServiceRequest beforeMarshalling( + AmazonWebServiceRequest request) { + return request; + } + + /** + * Request before marshalling. + * @param request request + */ + default void beforeRequest(Request request) { + } + + /** + * Before any attempt is made. + * @param context full context, including the request. + */ + default void beforeAttempt(HandlerBeforeAttemptContext context) { + } + + /** + * After any attempt is made. + * @param context full context, including the request. + */ + default void afterAttempt( + HandlerAfterAttemptContext context) { + } + + /** + * Before unmarshalling the response. + * @param request request made. + * @param httpResponse response received + * @return updated response. + */ + default HttpResponse beforeUnmarshalling( + final Request request, + final HttpResponse httpResponse) { + return httpResponse; + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AWSRequestAnalyzer.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AWSRequestAnalyzer.java new file mode 100644 index 0000000000000..a5a6dbc84797f --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AWSRequestAnalyzer.java @@ -0,0 +1,296 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.util.List; + +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CopyPartRequest; +import com.amazonaws.services.s3.model.DeleteObjectRequest; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.GetBucketLocationRequest; +import com.amazonaws.services.s3.model.GetObjectMetadataRequest; +import com.amazonaws.services.s3.model.GetObjectRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; +import com.amazonaws.services.s3.model.ListNextBatchOfObjectsRequest; +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ListObjectsV2Request; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.SelectObjectContentRequest; +import com.amazonaws.services.s3.model.UploadPartRequest; + +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.ACTION_HTTP_GET_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.ACTION_HTTP_HEAD_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.MULTIPART_UPLOAD_ABORTED; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.MULTIPART_UPLOAD_COMPLETED; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.MULTIPART_UPLOAD_LIST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.MULTIPART_UPLOAD_PART_PUT; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.MULTIPART_UPLOAD_STARTED; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_BULK_DELETE_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_DELETE_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_LIST_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_PUT_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_SELECT_REQUESTS; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.STORE_EXISTS_PROBE; + +/** + * Extract information from a request. + * Intended for reporting and error logs. + */ +public class AWSRequestAnalyzer { + + /** + * Given an AWS request, try to analyze it to operation, + * read/write and path. + * @param request request. + * @return information about the request. + * @param type of request. + */ + public RequestInfo analyze(T request) { + + // this is where Scala's case statement would massively + // simplify life. + // Please Keep in Alphabetical Order. + if (request instanceof AbortMultipartUploadRequest) { + return writing(MULTIPART_UPLOAD_ABORTED, + ((AbortMultipartUploadRequest) request).getKey(), + 0); + } else if (request instanceof CompleteMultipartUploadRequest) { + CompleteMultipartUploadRequest r + = (CompleteMultipartUploadRequest) request; + return writing(MULTIPART_UPLOAD_COMPLETED, + r.getKey(), + r.getPartETags().size()); + } else if (request instanceof DeleteObjectRequest) { + // DeleteObject: single object + return writing(OBJECT_DELETE_REQUEST, + ((DeleteObjectRequest) request).getKey(), + 1); + } else if (request instanceof DeleteObjectsRequest) { + // DeleteObjects: bulk delete + // use first key as the path + DeleteObjectsRequest r = (DeleteObjectsRequest) request; + List keys + = r.getKeys(); + return writing(OBJECT_BULK_DELETE_REQUEST, + keys.isEmpty() ? null : keys.get(0).getKey(), + keys.size()); + } else if (request instanceof GetBucketLocationRequest) { + GetBucketLocationRequest r = (GetBucketLocationRequest) request; + return reading(STORE_EXISTS_PROBE, + r.getBucketName(), + 0); + } else if (request instanceof GetObjectMetadataRequest) { + return reading(ACTION_HTTP_HEAD_REQUEST, + ((GetObjectMetadataRequest) request).getKey(), 0); + } else if (request instanceof GetObjectRequest) { + GetObjectRequest r = (GetObjectRequest) request; + long[] range = r.getRange(); + long size = range == null + ? -1 + : range[1] - range[0]; + return reading(ACTION_HTTP_GET_REQUEST, + r.getKey(), + size); + } else if (request instanceof InitiateMultipartUploadRequest) { + return writing(MULTIPART_UPLOAD_STARTED, + ((InitiateMultipartUploadRequest) request).getKey(), + 0); + } else if (request instanceof ListMultipartUploadsRequest) { + ListMultipartUploadsRequest r + = (ListMultipartUploadsRequest) request; + return reading(MULTIPART_UPLOAD_LIST, + r.getPrefix(), + r.getMaxUploads()); + } else if (request instanceof ListObjectsRequest) { + ListObjectsRequest r = (ListObjectsRequest) request; + return reading(OBJECT_LIST_REQUEST, + r.getPrefix(), + r.getMaxKeys()); + } else if (request instanceof ListNextBatchOfObjectsRequest) { + ListNextBatchOfObjectsRequest r = (ListNextBatchOfObjectsRequest) request; + ObjectListing l = r.getPreviousObjectListing(); + String prefix = ""; + int size = 0; + if (l != null) { + prefix = l.getPrefix(); + size = l.getMaxKeys(); + } + return reading(OBJECT_LIST_REQUEST, + prefix, + size); + } else if (request instanceof ListObjectsV2Request) { + ListObjectsV2Request r = (ListObjectsV2Request) request; + return reading(OBJECT_LIST_REQUEST, + r.getPrefix(), + r.getMaxKeys()); + } else if (request instanceof PutObjectRequest) { + PutObjectRequest r = (PutObjectRequest) request; + return writing(OBJECT_PUT_REQUEST, + r.getKey(), + 0); + } else if (request instanceof SelectObjectContentRequest) { + SelectObjectContentRequest r = + (SelectObjectContentRequest) request; + return reading(OBJECT_SELECT_REQUESTS, + r.getKey(), + 1); + } else if (request instanceof UploadPartRequest) { + UploadPartRequest r = (UploadPartRequest) request; + return writing(MULTIPART_UPLOAD_PART_PUT, + r.getKey(), + r.getPartSize()); + } + // no explicit support, return classname + return writing(request.getClass().getName(), null, 0); + } + + /** + * A request. + * @param verb verb + * @param mutating does this update the store + * @param key object/prefix, etc. + * @param size nullable size + * @return request info + */ + private RequestInfo request(final String verb, + final boolean mutating, + final String key, + final Number size) { + return new RequestInfo(verb, mutating, key, size); + } + + /** + * A read request. + * @param verb verb + * @param key object/prefix, etc. + * @param size nullable size + * @return request info + */ + private RequestInfo reading(final String verb, + final String key, final Number size) { + return request(verb, false, key, size); + } + + /** + * A write request of some form. + * @param verb verb + * @param key object/prefix, etc. + * @param size nullable size + * @return request info + */ + private RequestInfo writing(final String verb, + final String key, final Number size) { + return request(verb, true, key, size); + } + + /** + * Predicate which returns true if the request is of a kind which + * could be outside a span because of how the AWS SDK generates them. + * @param request request + * @return true if the transfer manager creates them. + */ + public static final boolean + isRequestNotAlwaysInSpan(final Object request) { + return request instanceof CopyPartRequest + || request instanceof CompleteMultipartUploadRequest + || request instanceof GetBucketLocationRequest; + } + + /** + * Info about a request. + */ + public static final class RequestInfo { + + /** + * Verb. + */ + private String verb; + + /** + * Is this a mutating call? + */ + private boolean mutating; + + /** + * Key if there is one; maybe first key in a list. + */ + private String key; + + /** + * Size, where the meaning of size depends on the request. + */ + private long size; + + /** + * Construct. + * @param verb operation/classname, etc. + * @param mutating does this update S3 State. + * @param key key/path/bucket operated on. + * @param size size of request (bytes, elements, limit...). Nullable. + */ + private RequestInfo(final String verb, + final boolean mutating, + final String key, + final Number size) { + this.verb = verb; + this.mutating = mutating; + this.key = key; + this.size = toSafeLong(size); + } + + public String getVerb() { + return verb; + } + + public boolean isMutating() { + return mutating; + } + + public String getKey() { + return key; + } + + public long getSize() { + return size; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "{"); + sb.append(verb); + if (key != null) { + sb.append(" '").append(key).append('\''); + } + sb.append(" size=").append(size); + sb.append(", mutating=").append(mutating); + sb.append('}'); + return sb.toString(); + } + } + + private static long toSafeLong(final Number size) { + return size != null ? size.longValue() : 0; + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditFailureException.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditFailureException.java new file mode 100644 index 0000000000000..5cd2b9647d834 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditFailureException.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import org.apache.hadoop.fs.s3a.CredentialInitializationException; + +/** + * This is in the AWS exception tree so that exceptions raised in the + * AWS SDK are correctly reported up. + * It is a subclass of {@link CredentialInitializationException} + * so that + * {@code S3AUtils.translateException()} recognizes these exceptions + * and converts them to AccessDeniedException. + */ +public class AuditFailureException extends CredentialInitializationException { + + public AuditFailureException(final String message, final Throwable t) { + super(message, t); + } + + public AuditFailureException(final String message) { + super(message); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditIntegration.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditIntegration.java new file mode 100644 index 0000000000000..7b9e66901ec4c --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditIntegration.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + +import com.amazonaws.HandlerContextAware; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.audit.impl.ActiveAuditManagerS3A; +import org.apache.hadoop.fs.s3a.audit.impl.LoggingAuditor; +import org.apache.hadoop.fs.s3a.audit.impl.NoopAuditManagerS3A; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; + +import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.fs.s3a.audit.impl.S3AInternalAuditConstants.AUDIT_SPAN_HANDLER_CONTEXT; + +/** + * Support for integrating auditing within the S3A code. + */ +public final class AuditIntegration { + + /** + * Logging. + */ + private static final Logger LOG = + LoggerFactory.getLogger(AuditIntegration.class); + + private AuditIntegration() { + } + + /** + * Create and start an audit manager. + * @param conf configuration + * @param iostatistics IOStatistics source. + * @return audit manager. + */ + public static AuditManagerS3A createAndStartAuditManager( + Configuration conf, + IOStatisticsStore iostatistics) { + ActiveAuditManagerS3A auditManager = new ActiveAuditManagerS3A( + requireNonNull(iostatistics)); + auditManager.init(conf); + auditManager.start(); + LOG.debug("Started Audit Manager {}", auditManager); + return auditManager; + } + + /** + * Return a stub audit manager. + * @return an audit manager. + */ + public static AuditManagerS3A stubAuditManager() { + return new NoopAuditManagerS3A(); + } + + /** + * Create and initialize an audit service. + * The service start operation is not called: that is left to + * the caller. + * @param conf configuration to read the key from and to use to init + * the service. + * @param key key containing the classname + * @param options options to initialize with. + * @return instantiated class. + * @throws IOException failure to initialise. + */ + public static OperationAuditor createAndInitAuditor( + Configuration conf, + String key, + OperationAuditorOptions options) throws IOException { + final Class auditClassname + = conf.getClass( + key, + LoggingAuditor.class, + OperationAuditor.class); + try { + LOG.debug("Auditor class is {}", auditClassname); + final Constructor constructor + = auditClassname.getConstructor(); + final OperationAuditor instance = constructor.newInstance(); + instance.init(options); + return instance; + } catch (NoSuchMethodException | InstantiationException + | RuntimeException + | IllegalAccessException | InvocationTargetException e) { + throw new IOException("Failed to instantiate class " + + auditClassname + + " defined in " + key + + ": " + e, + e); + } + } + + /** + * Get the span from a handler context. + * @param request request + * @param type of request. + * @return the span callbacks or null + */ + public static AWSAuditEventCallbacks + retrieveAttachedSpan(final T request) { + return request.getHandlerContext(AUDIT_SPAN_HANDLER_CONTEXT); + } + + /** + * Attach a span to a handler context. + * @param request request + * @param span span to attach + * @param type of request. + */ + public static void attachSpanToRequest( + final T request, final AWSAuditEventCallbacks span) { + request.addHandlerContext(AUDIT_SPAN_HANDLER_CONTEXT, span); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditManagerS3A.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditManagerS3A.java new file mode 100644 index 0000000000000..c1302d57454b1 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditManagerS3A.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.io.IOException; +import java.util.List; + +import com.amazonaws.handlers.RequestHandler2; +import com.amazonaws.services.s3.transfer.internal.TransferStateChangeListener; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.store.audit.ActiveThreadSpanSource; +import org.apache.hadoop.fs.store.audit.AuditSpanSource; +import org.apache.hadoop.service.Service; + +/** + * Interface for Audit Managers auditing operations through the + * AWS libraries. + * The Audit Manager is the binding between S3AFS and the instantiated + * plugin point -it adds: + *
      + *
    1. per-thread tracking of audit spans
    2. + *
    3. The wiring up to the AWS SDK
    4. + *
    5. State change tracking for copy operations (does not address issue)
    6. + *
    + */ +@InterfaceAudience.Private +public interface AuditManagerS3A extends Service, + AuditSpanSource, + AWSAuditEventCallbacks, + ActiveThreadSpanSource { + + /** + * Get the auditor; valid once initialized. + * @return the auditor. + */ + OperationAuditor getAuditor(); + + /** + * Create the request handler(s) for this audit service. + * The list returned is mutable; new handlers may be added. + * @return list of handlers for the SDK. + * @throws IOException failure. + */ + List createRequestHandlers() throws IOException; + + /** + * Return a transfer state change callback which + * fixes the active span context to be that in which + * the state change listener was created. + * This can be used to audit the creation of the multipart + * upload initiation request which the transfer manager + * makes when a file to be copied is split up. + * This must be invoked/used within the active span. + * @return a state change listener. + */ + TransferStateChangeListener createStateChangeListener(); + + /** + * Check for permission to access a path. + * The path is fully qualified and the status is the + * status of the path. + * This is called from the {@code FileSystem.access()} command + * and is a soft permission check used by Hive. + * @param path path to check + * @param status status of the path. + * @param mode access mode. + * @return true if access is allowed. + * @throws IOException failure + */ + boolean checkAccess(Path path, S3AFileStatus status, FsAction mode) + throws IOException; +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditSpanS3A.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditSpanS3A.java new file mode 100644 index 0000000000000..3e4e70918ac36 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/AuditSpanS3A.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import org.apache.hadoop.fs.store.audit.AuditSpan; + +/** + * An Audit Span with S3A integration, specifically + * callbacks from the AWS client SDK. + */ +public interface AuditSpanS3A extends AuditSpan, + AWSAuditEventCallbacks { +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/OperationAuditor.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/OperationAuditor.java new file mode 100644 index 0000000000000..672bcdf7f9c77 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/OperationAuditor.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.store.audit.AuditSpanSource; +import org.apache.hadoop.service.Service; + +/** + * Interfaces for audit services to implement. + */ +public interface OperationAuditor extends Service, + IOStatisticsSource, AuditSpanSource { + + /** + * Initialize. + * The base class will call {@link Service#init(Configuration)}. + * @param options options to initialize with. + */ + void init(OperationAuditorOptions options); + + /** + * Get the unbonded span to use after deactivating an active + * span. + * @return a span. + */ + AuditSpanS3A getUnbondedSpan(); + + /** + * Check for permission to access a path. + * The path is fully qualified and the status is the + * status of the path. + * This is called from the {@code FileSystem.access()} command + * and is a soft permission check used by Hive. + * @param path path to check + * @param status status of the path. + * @param mode access mode. + * @return true if access is allowed. + * @throws IOException failure + */ + default boolean checkAccess(Path path, S3AFileStatus status, FsAction mode) + throws IOException { + return true; + } + + /** + * Get the Auditor ID. + * @return ID + */ + String getAuditorId(); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/OperationAuditorOptions.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/OperationAuditorOptions.java new file mode 100644 index 0000000000000..40d1b23651b88 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/OperationAuditorOptions.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; + +/** + * Options for the {@link OperationAuditor}. + * Done as a builder and passed in so + * that if it is extended, external auditors will still link. + */ +public final class OperationAuditorOptions { + + private Configuration configuration; + private IOStatisticsStore ioStatisticsStore; + + + private OperationAuditorOptions() { + } + + public Configuration getConfiguration() { + return configuration; + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public OperationAuditorOptions withConfiguration(final Configuration value) { + configuration = value; + return this; + } + + public IOStatisticsStore getIoStatisticsStore() { + return ioStatisticsStore; + } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public OperationAuditorOptions withIoStatisticsStore( + final IOStatisticsStore value) { + ioStatisticsStore = value; + return this; + } + + /** + * Create one. + * @return a new option instance + */ + public static OperationAuditorOptions builder() { + return new OperationAuditorOptions(); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3AAuditConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3AAuditConstants.java new file mode 100644 index 0000000000000..2b08893df6e82 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3AAuditConstants.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.LimitedPrivate("S3A auditing extensions") +@InterfaceStability.Unstable +public final class S3AAuditConstants { + + private S3AAuditConstants() { + } + + /** + * What to look for in logs for ops outside any audit. + * {@value}. + */ + public static final String UNAUDITED_OPERATION = "unaudited operation"; + + /** + * Name of class used for audit logs: {@value}. + */ + public static final String AUDIT_SERVICE_CLASSNAME = + "fs.s3a.audit.service.classname"; + + /** + * Classname of the logging auditor: {@value}. + */ + public static final String LOGGING_AUDIT_SERVICE = + "org.apache.hadoop.fs.s3a.audit.impl.LoggingAuditor"; + + /** + * Classname of the No-op auditor: {@value}. + */ + public static final String NOOP_AUDIT_SERVICE = + "org.apache.hadoop.fs.s3a.audit.impl.NoopAuditor"; + + /** + * List of extra AWS SDK request handlers: {@value}. + * These are added to the SDK request chain after + * any audit service. + */ + public static final String AUDIT_REQUEST_HANDLERS = + "fs.s3a.audit.request.handlers"; + + /** + * Should operations outside spans be rejected? + * This is for testing coverage of the span code; if used + * in production there's a risk of unexpected failures. + * {@value}. + */ + public static final String REJECT_OUT_OF_SPAN_OPERATIONS + = "fs.s3a.audit.reject.out.of.span.operations"; + + /** + * Should the logging auditor add the HTTP Referrer header? + * {@value}. + */ + public static final String REFERRER_HEADER_ENABLED + = "fs.s3a.audit.referrer.enabled"; + + /** + * Should the logging auditor add the HTTP Referrer header? + * Default value: {@value}. + */ + public static final boolean REFERRER_HEADER_ENABLED_DEFAULT + = true; + + /** + * List of audit fields to strip from referrer headers. + * {@value}. + */ + public static final String REFERRER_HEADER_FILTER + = "fs.s3a.audit.referrer.filter"; + + /** + * Span name used during initialization. + */ + public static final String INITIALIZE_SPAN = "initialize"; + + /** + * Operation name for any operation outside of an explicit + * span. + */ + public static final String OUTSIDE_SPAN = + "outside-span"; +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3LogParser.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3LogParser.java new file mode 100644 index 0000000000000..ac29234a771e9 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3LogParser.java @@ -0,0 +1,309 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.regex.Pattern; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Class to help parse AWS S3 Logs. + * see https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html + * + * Getting the regexp right is surprisingly hard; this class does it + * explicitly and names each group in the process. + * All group names are included in {@link #AWS_LOG_REGEXP_GROUPS} in the order + * within the log entries. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public final class S3LogParser { + + private S3LogParser() { + } + + /** + * Simple entry: anything up to a space. + * {@value}. + */ + private static final String SIMPLE = "[^ ]*"; + + /** + * Date/Time. Everything within square braces. + * {@value}. + */ + private static final String DATETIME = "\\[(.*?)\\]"; + + /** + * A natural number or "-". + * {@value}. + */ + private static final String NUMBER = "(-|[0-9]*)"; + + /** + * A Quoted field or "-". + * {@value}. + */ + private static final String QUOTED = "(-|\"[^\"]*\")"; + + + /** + * An entry in the regexp. + * @param name name of the group + * @param pattern pattern to use in the regexp + * @return the pattern for the regexp + */ + private static String e(String name, String pattern) { + return String.format("(?<%s>%s) ", name, pattern); + } + + /** + * An entry in the regexp. + * @param name name of the group + * @param pattern pattern to use in the regexp + * @return the pattern for the regexp + */ + private static String eNoTrailing(String name, String pattern) { + return String.format("(?<%s>%s)", name, pattern); + } + + /** + * Simple entry using the {@link #SIMPLE} pattern. + * @param name name of the element (for code clarity only) + * @return the pattern for the regexp + */ + private static String e(String name) { + return e(name, SIMPLE); + } + + /** + * Quoted entry using the {@link #QUOTED} pattern. + * @param name name of the element (for code clarity only) + * @return the pattern for the regexp + */ + private static String q(String name) { + return e(name, QUOTED); + } + + /** + * Log group {@value}. + */ + public static final String OWNER_GROUP = "owner"; + + /** + * Log group {@value}. + */ + public static final String BUCKET_GROUP = "bucket"; + + /** + * Log group {@value}. + */ + public static final String TIMESTAMP_GROUP = "timestamp"; + + /** + * Log group {@value}. + */ + public static final String REMOTEIP_GROUP = "remoteip"; + + /** + * Log group {@value}. + */ + public static final String REQUESTER_GROUP = "requester"; + + /** + * Log group {@value}. + */ + public static final String REQUESTID_GROUP = "requestid"; + + /** + * Log group {@value}. + */ + public static final String VERB_GROUP = "verb"; + + /** + * Log group {@value}. + */ + public static final String KEY_GROUP = "key"; + + /** + * Log group {@value}. + */ + public static final String REQUESTURI_GROUP = "requesturi"; + + /** + * Log group {@value}. + */ + public static final String HTTP_GROUP = "http"; + + /** + * Log group {@value}. + */ + public static final String AWSERRORCODE_GROUP = "awserrorcode"; + + /** + * Log group {@value}. + */ + public static final String BYTESSENT_GROUP = "bytessent"; + + /** + * Log group {@value}. + */ + public static final String OBJECTSIZE_GROUP = "objectsize"; + + /** + * Log group {@value}. + */ + public static final String TOTALTIME_GROUP = "totaltime"; + + /** + * Log group {@value}. + */ + public static final String TURNAROUNDTIME_GROUP = "turnaroundtime"; + + /** + * Log group {@value}. + */ + public static final String REFERRER_GROUP = "referrer"; + + /** + * Log group {@value}. + */ + public static final String USERAGENT_GROUP = "useragent"; + + /** + * Log group {@value}. + */ + public static final String VERSION_GROUP = "version"; + + /** + * Log group {@value}. + */ + public static final String HOSTID_GROUP = "hostid"; + + /** + * Log group {@value}. + */ + public static final String SIGV_GROUP = "sigv"; + + /** + * Log group {@value}. + */ + public static final String CYPHER_GROUP = "cypher"; + + /** + * Log group {@value}. + */ + public static final String AUTH_GROUP = "auth"; + + /** + * Log group {@value}. + */ + public static final String ENDPOINT_GROUP = "endpoint"; + + /** + * Log group {@value}. + */ + public static final String TLS_GROUP = "tls"; + + /** + * This is where anything at the tail of a log + * entry ends up; it is null unless/until the AWS + * logs are enhanced in future. + * Value {@value}. + */ + public static final String TAIL_GROUP = "tail"; + + /** + * Construct the log entry pattern. + */ + public static final String LOG_ENTRY_REGEXP = "" + + e(OWNER_GROUP) + + e(BUCKET_GROUP) + + e(TIMESTAMP_GROUP, DATETIME) + + e(REMOTEIP_GROUP) + + e(REQUESTER_GROUP) + + e(REQUESTID_GROUP) + + e(VERB_GROUP) + + e(KEY_GROUP) + + q(REQUESTURI_GROUP) + + e(HTTP_GROUP, NUMBER) + + e(AWSERRORCODE_GROUP) + + e(BYTESSENT_GROUP) + + e(OBJECTSIZE_GROUP) + + e(TOTALTIME_GROUP) + + e(TURNAROUNDTIME_GROUP) + + q(REFERRER_GROUP) + + q(USERAGENT_GROUP) + + e(VERSION_GROUP) + + e(HOSTID_GROUP) + + e(SIGV_GROUP) + + e(CYPHER_GROUP) + + e(AUTH_GROUP) + + e(ENDPOINT_GROUP) + + eNoTrailing(TLS_GROUP, SIMPLE) + + eNoTrailing(TAIL_GROUP, ".*") // anything which follows + + "$"; // end of line + + /** + * Groups in order. + */ + private static final String[] GROUPS = { + OWNER_GROUP, + BUCKET_GROUP, + TIMESTAMP_GROUP, + REMOTEIP_GROUP, + REQUESTER_GROUP, + REQUESTID_GROUP, + VERB_GROUP, + KEY_GROUP, + REQUESTURI_GROUP, + HTTP_GROUP, + AWSERRORCODE_GROUP, + BYTESSENT_GROUP, + OBJECTSIZE_GROUP, + TOTALTIME_GROUP, + TURNAROUNDTIME_GROUP, + REFERRER_GROUP, + USERAGENT_GROUP, + VERSION_GROUP, + HOSTID_GROUP, + SIGV_GROUP, + CYPHER_GROUP, + AUTH_GROUP, + ENDPOINT_GROUP, + TLS_GROUP, + TAIL_GROUP + }; + + /** + * Ordered list of regular expression group names. + */ + public static final List AWS_LOG_REGEXP_GROUPS = + Collections.unmodifiableList(Arrays.asList(GROUPS)); + + /** + * And the actual compiled pattern. + */ + public static final Pattern LOG_ENTRY_PATTERN = Pattern.compile( + LOG_ENTRY_REGEXP); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3LogVerbs.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3LogVerbs.java new file mode 100644 index 0000000000000..0bafdf2f66766 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/S3LogVerbs.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +/** + * Various verbs in the S3 logs. + * https://stackoverflow.com/questions/42707878/amazon-s3-logs-operation-definition + */ +public final class S3LogVerbs { + + private S3LogVerbs() { + } + + public static final String DELETE = "REST.DELETE.OBJECT"; + public static final String COPY = "REST.COPY.OBJECT"; + public static final String DELETE_BULK = "REST.POST.MULTI_OBJECT_DELETE"; + public static final String DELETE_BULK_ENTRY = "BATCH.DELETE.OBJECT"; + public static final String GET = "REST.GET.OBJECT"; + public static final String HEAD = "REST.HEAD.OBJECT"; + public static final String GET_ACL = "REST.GET.ACL"; + public static final String GET_LOGGING_STATUS = "REST.GET.LOGGING_STATUS"; + public static final String LIST = "REST.GET.BUCKET"; + public static final String MULTIPART_UPLOAD_START = "REST.POST.UPLOADS"; + public static final String MULTIPART_UPLOAD_PART = "REST.PUT.PART"; + public static final String MULTIPART_UPLOAD_COMPLETE = "REST.POST.UPLOAD"; + public static final String MULTIPART_UPLOADS_LIST = "REST.GET.UPLOADS"; + public static final String MULTIPART_UPLOAD_ABORT = "REST.DELETE.UPLOAD"; + public static final String PUT = "REST.PUT.OBJECT"; + public static final String REST_GET_POLICY_STATUS = "REST.GET.POLICY_STATUS"; + public static final String REST_GET_PUBLIC_ACCESS_BLOCK = + "REST.GET.PUBLIC_ACCESS_BLOCK"; + public static final String REST_GET_TAGGING = "REST.GET.TAGGING"; + public static final String S3_EXPIRE_OBJECT = "S3.EXPIRE.OBJECT"; + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/AbstractAuditSpanImpl.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/AbstractAuditSpanImpl.java new file mode 100644 index 0000000000000..510fbab14f958 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/AbstractAuditSpanImpl.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit.impl; + +import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.util.Time; + +import static java.util.Objects.requireNonNull; + +/** + * Base class for the audit spans implementations.. + */ +public abstract class AbstractAuditSpanImpl implements AuditSpanS3A { + + /** + * Span ID. + */ + private final String spanId; + + /** + * Timestamp in UTC of span creation. + */ + private final long timestamp; + + private final String operationName; + + /** + * Constructor. + * @param spanId span ID. + * @param operationName operation name + */ + protected AbstractAuditSpanImpl( + final String spanId, + final String operationName) { + this(spanId, Time.now(), operationName); + } + + /** + * Constructor. + * @param spanId span ID. + * @param timestamp timestamp in millis + * @param operationName operation name + */ + protected AbstractAuditSpanImpl( + final String spanId, + final long timestamp, + final String operationName) { + this.spanId = requireNonNull(spanId); + this.timestamp = timestamp; + this.operationName = operationName; + } + + @Override + public final String getSpanId() { + return spanId; + } + + @Override + public String getOperationName() { + return operationName; + } + + @Override + public final long getTimestamp() { + return timestamp; + } + + @Override + public AuditSpanS3A activate() { + return this; + } + + /** + * Invoke {@link AuditSpan#deactivate()}. + * This is final: subclasses MUST override the + * {@code deactivate()} method. + */ + @Override + public final void close() { + deactivate(); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/AbstractOperationAuditor.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/AbstractOperationAuditor.java new file mode 100644 index 0000000000000..97ee92a20b1e3 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/AbstractOperationAuditor.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit.impl; + +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.fs.s3a.audit.OperationAuditor; +import org.apache.hadoop.fs.s3a.audit.OperationAuditorOptions; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.service.AbstractService; + +/** + * This is a long-lived service which is created in S3A FS initialize + * (make it fast!) which provides context for tracking operations made to S3. + * An IOStatisticsStore is passed in -in production this is expected to + * be the S3AFileSystem instrumentation, which will have the + * {@code AUDIT_SPAN_START} statistic configured for counting durations. + */ +public abstract class AbstractOperationAuditor extends AbstractService + implements OperationAuditor { + + /** + * Base of IDs is a UUID. + */ + public static final String BASE = UUID.randomUUID().toString(); + + /** + * Counter to create unique auditor IDs. + */ + private static final AtomicLong SPAN_ID_COUNTER = new AtomicLong(1); + + /** + * Destination for recording statistics, especially duration/count of + * operations. + * Set in {@link #init(OperationAuditorOptions)}. + */ + private IOStatisticsStore iostatistics; + + /** + * Options: set in {@link #init(OperationAuditorOptions)}. + */ + private OperationAuditorOptions options; + + /** + * Auditor ID as a UUID. + */ + private final UUID auditorUUID = UUID.randomUUID(); + + /** + * ID of the auditor, which becomes that of the filesystem + * in request contexts. + */ + private final String auditorID = auditorUUID.toString(); + + /** + * Construct. + * @param name name + * + */ + protected AbstractOperationAuditor(final String name) { + super(name); + } + + /** + * Sets the IOStats and then calls init(). + * @param opts options to initialize with. + */ + @Override + public void init(final OperationAuditorOptions opts) { + this.options = opts; + this.iostatistics = opts.getIoStatisticsStore(); + init(opts.getConfiguration()); + } + + @Override + public String getAuditorId() { + return auditorID; + } + + /** + * Get the IOStatistics Store. + * @return the IOStatistics store updated with statistics. + */ + public IOStatisticsStore getIOStatistics() { + return iostatistics; + } + + /** + * Get the options this auditor was initialized with. + * @return options. + */ + protected OperationAuditorOptions getOptions() { + return options; + } + + /** + * Create a span ID. + * @return a unique span ID. + */ + protected final String createSpanID() { + return String.format("%s-%08d", + auditorID, SPAN_ID_COUNTER.incrementAndGet()); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/ActiveAuditManagerS3A.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/ActiveAuditManagerS3A.java new file mode 100644 index 0000000000000..e7e775dafc0d0 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/ActiveAuditManagerS3A.java @@ -0,0 +1,773 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit.impl; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.util.ArrayList; +import java.util.List; + +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.HandlerContextAware; +import com.amazonaws.Request; +import com.amazonaws.Response; +import com.amazonaws.SdkBaseException; +import com.amazonaws.handlers.HandlerAfterAttemptContext; +import com.amazonaws.handlers.HandlerBeforeAttemptContext; +import com.amazonaws.handlers.RequestHandler2; +import com.amazonaws.http.HttpResponse; +import com.amazonaws.services.s3.transfer.Transfer; +import com.amazonaws.services.s3.transfer.internal.TransferStateChangeListener; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.audit.AWSAuditEventCallbacks; +import org.apache.hadoop.fs.s3a.audit.AWSRequestAnalyzer; +import org.apache.hadoop.fs.s3a.audit.AuditFailureException; +import org.apache.hadoop.fs.s3a.audit.AuditIntegration; +import org.apache.hadoop.fs.s3a.audit.AuditManagerS3A; +import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; +import org.apache.hadoop.fs.s3a.audit.OperationAuditor; +import org.apache.hadoop.fs.s3a.audit.OperationAuditorOptions; +import org.apache.hadoop.fs.s3a.audit.S3AAuditConstants; +import org.apache.hadoop.fs.store.LogExactlyOnce; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.util.functional.FutureIO; + +import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_FAILURE; +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_REQUEST_EXECUTION; +import static org.apache.hadoop.fs.s3a.audit.AuditIntegration.attachSpanToRequest; +import static org.apache.hadoop.fs.s3a.audit.AuditIntegration.retrieveAttachedSpan; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.AUDIT_REQUEST_HANDLERS; + +/** + * Thread management for the active audit. + * This should be created by whatever wants to have active + * audit span tracking. + * + * It creates and starts the actual + * {@link OperationAuditor} for auditing. + * It then stores the thread-local span and returns a wrapping + * span. + * + * When the wrapper is closed/deactivated it + * will deactivate the wrapped span and then + * switch the active span to the unbounded span. + * + * The inner class {@link AWSAuditEventCallbacks} is returned + * as a request handler in {@link #createRequestHandlers()}; + * this forwards all requests to the outer {@code ActiveAuditManagerS3A}, + * which then locates the active span and forwards the request. + * If any such invocation raises an {@link AuditFailureException} + * then the IOStatistics counter for {@code AUDIT_FAILURE} + * is incremented. + * + */ +@InterfaceAudience.Private +public final class ActiveAuditManagerS3A + extends CompositeService + implements AuditManagerS3A { + + /** + * Logging. + */ + private static final Logger LOG = + LoggerFactory.getLogger(ActiveAuditManagerS3A.class); + + /** + * One of logger for warnings about span retrieval. + */ + public static final LogExactlyOnce WARN_OF_SPAN_TYPE = + new LogExactlyOnce(LOG); + + public static final String AUDIT_MANAGER_OPERATION = "AuditManagerS3A"; + + public static final String NOT_A_WRAPPED_SPAN + = "Span attached to request is not a wrapped span"; + + /** + * Audit service. + */ + private OperationAuditor auditor; + + /** + * Some basic analysis for the logs. + */ + private final AWSRequestAnalyzer analyzer = new AWSRequestAnalyzer(); + + /** + * This is the span returned to after a wrapper is closed or + * the span is reset to the unbonded span.. + */ + private WrappingAuditSpan unbondedSpan; + + /** + * Thread local span. This defaults to being + * the unbonded span. + */ + private final ThreadLocal activeSpan = + ThreadLocal.withInitial(() -> getUnbondedSpan()); + + /** + * Destination for recording statistics, especially duration/count of + * operations. + */ + private final IOStatisticsStore ioStatisticsStore; + + /** + * Instantiate. + * @param iostatistics statistics target + */ + public ActiveAuditManagerS3A(final IOStatisticsStore iostatistics) { + super("ActiveAuditManagerS3A"); + this.ioStatisticsStore = iostatistics; + } + + @Override + protected void serviceInit(final Configuration conf) throws Exception { + super.serviceInit(conf); + // create and register the service so it follows the same lifecycle + OperationAuditorOptions options = + OperationAuditorOptions.builder() + .withConfiguration(conf) + .withIoStatisticsStore(ioStatisticsStore); + auditor = AuditIntegration.createAndInitAuditor( + getConfig(), + S3AAuditConstants.AUDIT_SERVICE_CLASSNAME, + options); + addService(auditor); + LOG.debug("Audit manager initialized with audit service {}", auditor); + } + + /** + * After starting the auditor, it is queried for its + * unbonded span, which is then wrapped and stored for + * use. + */ + @Override + protected void serviceStart() throws Exception { + super.serviceStart(); + setUnbondedSpan(new WrappingAuditSpan( + auditor.getUnbondedSpan(), false)); + LOG.debug("Started audit service {}", auditor); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(super.toString()); + sb.append(", auditor=").append(auditor); + sb.append('}'); + return sb.toString(); + } + + @Override + public OperationAuditor getAuditor() { + return auditor; + } + + /** + * Get the unbounded span. Until this manager + * is fully initialized it will return the no-op + * span. + * @return the unbounded span. + */ + private WrappingAuditSpan getUnbondedSpan() { + return unbondedSpan; + } + + /** + * Set the unbonded span. + * @param unbondedSpan the new unbonded span + */ + private void setUnbondedSpan(final WrappingAuditSpan unbondedSpan) { + this.unbondedSpan = unbondedSpan; + } + + /** + * Return the active wrapped span. + * @return a span. + */ + @Override + public AuditSpanS3A getActiveAuditSpan() { + return activeSpan(); + } + + /** + * Get the active span. + * This is the wrapped span, not the inner one, and it is + * of that type. + * @return the active WrappingAuditSpan + */ + private WrappingAuditSpan activeSpan() { + return activeSpan.get(); + } + + /** + * Set a specific span as the active span. + * This will wrap it. + * @param span span to use. + * @return the wrapped span. + */ + private AuditSpanS3A setActiveThreadSpan(AuditSpanS3A span) { + return switchToActiveSpan( + new WrappingAuditSpan(span, span.isValidSpan())); + } + + /** + * Switch to a given span. If it is null, use the + * unbounded span. + * @param span to switch to; may be null + * @return the span switched to + */ + private WrappingAuditSpan switchToActiveSpan(WrappingAuditSpan span) { + if (span != null && span.isValidSpan()) { + activeSpan.set(span); + } else { + activeSpan.set(unbondedSpan); + } + return activeSpan(); + } + + /** + * The Span ID in the audit manager is the ID of the auditor, + * which can be used in the filesystem toString() method + * to assist in correlating client logs with S3 logs. + * It is returned here as part of the implementation of + * {@link AWSAuditEventCallbacks}. + * @return the unique ID of the FS. + */ + @Override + public String getSpanId() { + return auditor != null + ? auditor.getAuditorId() + : "(auditor not yet created)"; + } + + @Override + public String getOperationName() { + return AUDIT_MANAGER_OPERATION; + } + + /** + * Start an operation; as well as invoking the audit + * service to do this, sets the operation as the + * active operation for this thread. + * @param operation operation name. + * @param path1 first path of operation + * @param path2 second path of operation + * @return a wrapped audit span + * @throws IOException failure + */ + @Override + public AuditSpanS3A createSpan(final String operation, + @Nullable final String path1, + @Nullable final String path2) throws IOException { + // must be started + Preconditions.checkState(isInState(STATE.STARTED), + "Audit Manager %s is in wrong state: %s", + this, getServiceState()); + ioStatisticsStore.incrementCounter( + Statistic.AUDIT_SPAN_CREATION.getSymbol()); + return setActiveThreadSpan(auditor.createSpan( + operation, path1, path2)); + } + + /** + * Return a request handler for the AWS SDK which + * relays to this class. + * @return a request handler. + */ + @Override + public List createRequestHandlers() + throws IOException { + + // wire up the AWS SDK To call back into this class when + // preparing to make S3 calls. + List requestHandlers = new ArrayList<>(); + requestHandlers.add(new SdkRequestHandler()); + // now look for any more handlers + final Class[] handlers = getConfig().getClasses(AUDIT_REQUEST_HANDLERS); + if (handlers != null) { + for (Class handler : handlers) { + try { + Constructor ctor = handler.getConstructor(); + requestHandlers.add((RequestHandler2)ctor.newInstance()); + } catch (ExceptionInInitializerError e) { + throw FutureIO.unwrapInnerException(e); + } catch (Exception e) { + throw new IOException(e); + } + } + } + return requestHandlers; + } + + @Override + public TransferStateChangeListener createStateChangeListener() { + final WrappingAuditSpan span = activeSpan(); + return new TransferStateChangeListener() { + @Override + public void transferStateChanged(final Transfer transfer, + final Transfer.TransferState state) { + switchToActiveSpan(span); + } + }; + } + + @Override + public boolean checkAccess(final Path path, + final S3AFileStatus status, + final FsAction mode) + throws IOException { + return auditor.checkAccess(path, status, mode); + } + + /** + * Attach a reference to the active thread span, then + * invoke the same callback on that active thread. + */ + @Override + public T requestCreated( + final T request) { + AuditSpanS3A span = getActiveAuditSpan(); + if (LOG.isTraceEnabled()) { + LOG.trace("Created Request {} in span {}", + analyzer.analyze(request), span); + } + attachSpanToRequest(request, span); + try { + return span.requestCreated(request); + } catch (AuditFailureException e) { + ioStatisticsStore.incrementCounter(AUDIT_FAILURE.getSymbol()); + throw e; + } + } + + /** + * Forward to the active span. + * All invocations increment the statistics counter for + * {@link Statistic#AUDIT_REQUEST_EXECUTION}; + * failures will also increment + * {@link Statistic#AUDIT_FAILURE}; + * {@inheritDoc} + */ + @Override + public T beforeExecution( + final T request) { + ioStatisticsStore.incrementCounter(AUDIT_REQUEST_EXECUTION.getSymbol()); + + // identify the span and invoke the callback + try { + return extractAndActivateSpanFromRequest(request) + .beforeExecution(request); + } catch (AuditFailureException e) { + ioStatisticsStore.incrementCounter(AUDIT_FAILURE.getSymbol()); + throw e; + } + } + + /** + * Forward to active span. + * @param request request + * @param response response. + */ + @Override + public void afterResponse(final Request request, + final Response response) + throws AuditFailureException, SdkBaseException { + try { + extractAndActivateSpanFromRequest(request) + .afterResponse(request, response); + } catch (AuditFailureException e) { + ioStatisticsStore.incrementCounter(AUDIT_FAILURE.getSymbol()); + throw e; + } + } + + /** + * Get the active span from the handler context, + * falling back to the active thread span if there + * is nothing in the context. + * Provided the span is a wrapped span, the + * @param request request + * @param type of request. + * @return the callbacks + */ + private AWSAuditEventCallbacks + extractAndActivateSpanFromRequest(final T request) { + AWSAuditEventCallbacks span; + span = retrieveAttachedSpan(request); + if (span == null) { + // no span is attached. Not unusual for the copy operations, + // or for calls to GetBucketLocation made by the AWS client + LOG.debug("No audit span attached to request {}", + request); + // fall back to the active thread span. + // this will be the unbonded span if the thread is unbonded. + span = getActiveAuditSpan(); + } else { + if (span instanceof WrappingAuditSpan) { + switchToActiveSpan((WrappingAuditSpan) span); + } else { + // warn/log and continue without switching. + WARN_OF_SPAN_TYPE.warn(NOT_A_WRAPPED_SPAN + ": {}", span); + LOG.debug(NOT_A_WRAPPED_SPAN + ": {}", span); + } + } + return span; + } + + /** + * Forward to active span. + * @param request request + * @param response response. + * @param exception exception raised. + */ + @Override + public void afterError(final Request request, + final Response response, + final Exception exception) + throws AuditFailureException, SdkBaseException { + try { + extractAndActivateSpanFromRequest(request) + .afterError(request, response, exception); + } catch (AuditFailureException e) { + ioStatisticsStore.incrementCounter(AUDIT_FAILURE.getSymbol()); + throw e; + } + } + + @Override + public AmazonWebServiceRequest beforeMarshalling( + final AmazonWebServiceRequest request) { + try { + return extractAndActivateSpanFromRequest(request) + .beforeMarshalling(request); + } catch (AuditFailureException e) { + ioStatisticsStore.incrementCounter(AUDIT_FAILURE.getSymbol()); + throw e; + } + } + + @Override + public void beforeRequest(final Request request) { + try { + extractAndActivateSpanFromRequest(request) + .beforeRequest(request); + } catch (AuditFailureException e) { + ioStatisticsStore.incrementCounter(AUDIT_FAILURE.getSymbol()); + throw e; + } + } + + @Override + public void beforeAttempt(final HandlerBeforeAttemptContext context) { + try { + extractAndActivateSpanFromRequest(context.getRequest()) + .beforeAttempt(context); + } catch (AuditFailureException e) { + ioStatisticsStore.incrementCounter(AUDIT_FAILURE.getSymbol()); + throw e; + } + } + + @Override + public void afterAttempt(final HandlerAfterAttemptContext context) { + try { + extractAndActivateSpanFromRequest(context.getRequest()) + .afterAttempt(context); + } catch (AuditFailureException e) { + ioStatisticsStore.incrementCounter(AUDIT_FAILURE.getSymbol()); + throw e; + } + } + + @Override + public HttpResponse beforeUnmarshalling(final Request request, + final HttpResponse httpResponse) { + try { + extractAndActivateSpanFromRequest(request.getOriginalRequest()) + .beforeUnmarshalling(request, httpResponse); + } catch (AuditFailureException e) { + ioStatisticsStore.incrementCounter(AUDIT_FAILURE.getSymbol()); + throw e; + } + return httpResponse; + } + + /** + * Callbacks from the AWS SDK; all forward to the ActiveAuditManagerS3A. + * We need a separate class because the SDK requires the handler list + * to be list of {@code RequestHandler2} instances. + */ + private class SdkRequestHandler extends RequestHandler2 { + + @Override + public AmazonWebServiceRequest beforeExecution( + final AmazonWebServiceRequest request) { + return ActiveAuditManagerS3A.this.beforeExecution(request); + } + + @Override + public void afterResponse(final Request request, + final Response response) { + ActiveAuditManagerS3A.this.afterResponse(request, response); + } + + @Override + public void afterError(final Request request, + final Response response, + final Exception e) { + ActiveAuditManagerS3A.this.afterError(request, response, e); + } + + @Override + public AmazonWebServiceRequest beforeMarshalling( + final AmazonWebServiceRequest request) { + return ActiveAuditManagerS3A.this.beforeMarshalling(request); + } + + @Override + public void beforeRequest(final Request request) { + ActiveAuditManagerS3A.this.beforeRequest(request); + } + + @Override + public void beforeAttempt( + final HandlerBeforeAttemptContext context) { + ActiveAuditManagerS3A.this.beforeAttempt(context); + } + + @Override + public HttpResponse beforeUnmarshalling( + final Request request, + final HttpResponse httpResponse) { + return ActiveAuditManagerS3A.this.beforeUnmarshalling(request, + httpResponse); + } + + @Override + public void afterAttempt( + final HandlerAfterAttemptContext context) { + ActiveAuditManagerS3A.this.afterAttempt(context); + } + } + + /** + * Wraps the plugged in spans with management of the active thread + * span, including switching to the unbounded span when a valid + * span is deactivated. + * Package-private for testing. + */ + private final class WrappingAuditSpan extends AbstractAuditSpanImpl { + + /** + * Inner span. + */ + private final AuditSpanS3A span; + + /** + * Is this span considered valid? + */ + private final boolean isValid; + + /** + * Create, wrapped. + * The spanID, name, timestamp etc copied from the span being wrapped. + * Why not the isValid state? We want to set our unbonded span without + * relying on the auditor doing the right thing. + * @param span inner span. + * @param isValid is the span valid + */ + private WrappingAuditSpan( + final AuditSpanS3A span, final boolean isValid) { + super(span.getSpanId(), span.getTimestamp(), span.getOperationName()); + this.span = requireNonNull(span); + this.isValid = isValid; + } + + /** + * Is the span active? + * @return true if this span is the active one for the current thread. + */ + private boolean isActive() { + return this == getActiveAuditSpan(); + } + + /** + * Makes this the thread's active span and activate. + * If the span was already active: no-op. + */ + @Override + public AuditSpanS3A activate() { + if (!isActive()) { + switchToActiveSpan(this); + span.activate(); + } + return this; + } + + /** + * Switch to the unbounded span and then deactivate this span. + * No-op for invalid spans, + * so as to prevent the unbounded span from being closed + * and everything getting very confused. + */ + @Override + public void deactivate() { + // no-op for invalid spans, + // so as to prevent the unbounded span from being closed + // and everything getting very confused. + if (!isValid || !isActive()) { + return; + } + // deactivate the span + span.deactivate(); + // and go to the unbounded one. + switchToActiveSpan(getUnbondedSpan()); + } + + /** + * Forward to the wrapped span. + * {@inheritDoc} + */ + @Override + public T requestCreated( + final T request) { + return span.requestCreated(request); + } + + /** + * This span is valid if the span isn't closed and the inner + * span is valid. + * @return true if the span is considered valid. + */ + @Override + public boolean isValidSpan() { + return isValid && span.isValidSpan(); + } + + /** + * Forward to the inner span. + * {@inheritDoc} + */ + @Override + public void set(final String key, final String value) { + span.set(key, value); + } + + /** + * Forward to the inner span. + * @param request request + * @param type of request + * @return an updated request. + */ + @Override + public T beforeExecution( + final T request) { + return span.beforeExecution(request); + } + + /** + * Forward to the inner span. + * @param request request + * @param response response. + */ + @Override + public void afterResponse(final Request request, + final Response response) { + span.afterResponse(request, response); + } + + /** + * Forward to the inner span. + * @param request request + * @param response response. + * @param exception exception raised. + */ + @Override + public void afterError(final Request request, + final Response response, + final Exception exception) { + span.afterError(request, response, exception); + } + + /** + * Forward to the inner span. + * @param request request + * @return request to marshall + */ + @Override + public AmazonWebServiceRequest beforeMarshalling( + final AmazonWebServiceRequest request) { + return span.beforeMarshalling(request); + } + + /** + * Forward to the inner span. + * @param request request + */ + @Override + public void beforeRequest(final Request request) { + span.beforeRequest(request); + } + + /** + * Forward to the inner span. + * @param context full context, including the request. + */ + @Override + public void beforeAttempt( + final HandlerBeforeAttemptContext context) { + span.beforeAttempt(context); + } + + /** + * Forward to the inner span. + * + * @param context full context, including the request. + */ + @Override + public void afterAttempt( + final HandlerAfterAttemptContext context) { + span.afterAttempt(context); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "WrappingAuditSpan{"); + sb.append("span=").append(span); + sb.append(", valid=").append(isValidSpan()); + sb.append('}'); + return sb.toString(); + } + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/LoggingAuditor.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/LoggingAuditor.java new file mode 100644 index 0000000000000..1722df178edc1 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/LoggingAuditor.java @@ -0,0 +1,435 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit.impl; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import com.amazonaws.AmazonWebServiceRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.audit.AuditConstants; +import org.apache.hadoop.fs.audit.CommonAuditContext; +import org.apache.hadoop.fs.s3a.audit.AWSRequestAnalyzer; +import org.apache.hadoop.fs.s3a.audit.AuditFailureException; +import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; +import org.apache.hadoop.fs.store.audit.HttpReferrerAuditHeader; +import org.apache.hadoop.security.UserGroupInformation; + +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_FILESYSTEM_ID; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_PRINCIPAL; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_THREAD0; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_TIMESTAMP; +import static org.apache.hadoop.fs.audit.CommonAuditContext.currentAuditContext; +import static org.apache.hadoop.fs.audit.CommonAuditContext.currentThreadID; +import static org.apache.hadoop.fs.s3a.audit.AWSRequestAnalyzer.isRequestNotAlwaysInSpan; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.OUTSIDE_SPAN; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.REFERRER_HEADER_ENABLED; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.REFERRER_HEADER_ENABLED_DEFAULT; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.REFERRER_HEADER_FILTER; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.REJECT_OUT_OF_SPAN_OPERATIONS; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.UNAUDITED_OPERATION; +import static org.apache.hadoop.fs.s3a.commit.CommitUtils.extractJobID; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.HEADER_REFERRER; + +/** + * The LoggingAuditor logs operations at DEBUG (in SDK Request) and + * in span lifecycle and S3 request class construction at TRACE. + * The context information is added as the HTTP referrer. + */ +@InterfaceAudience.Private +public class LoggingAuditor + extends AbstractOperationAuditor { + + /** + * This is where the context gets logged to. + */ + private static final Logger LOG = + LoggerFactory.getLogger(LoggingAuditor.class); + + + /** + * Some basic analysis for the logs. + */ + private final AWSRequestAnalyzer analyzer = new AWSRequestAnalyzer(); + + /** + * Default span to use when there is no other. + */ + private AuditSpanS3A warningSpan; + + /** + * Should out of scope ops be rejected? + */ + private boolean rejectOutOfSpan; + + /** + * Map of attributes which will be added to all operations. + */ + private final Map attributes = new HashMap<>(); + + /** + * Should the referrer header be added? + */ + private boolean headerEnabled; + + /** + * This is the header sent by the last S3 operation through + * this auditor. + *

    + * It is for testing -allows for Integration tests to + * verify that a header was sent and query what was in it. + * Initially an empty string. + */ + private volatile String lastHeader = ""; + + /** + * Attributes to filter. + */ + private Collection filters; + + /** + * Create the auditor. + * The UGI current user is used to provide the principal; + * this will be cached and provided in the referrer header. + */ + public LoggingAuditor() { + + super("LoggingAuditor "); + attributes.put(PARAM_FILESYSTEM_ID, getAuditorId()); + + + // add the principal + try { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + addAttribute(PARAM_PRINCIPAL, ugi.getUserName()); + } catch (IOException ex) { + LOG.warn("Auditor unable to determine principal", ex); + } + } + + /** + * Service init, look for jobID and attach as an attribute in log entries. + * This is where the warning span is created, so the relevant attributes + * (and filtering options) are applied. + * @param conf configuration + * @throws Exception failure + */ + @Override + protected void serviceInit(final Configuration conf) throws Exception { + super.serviceInit(conf); + rejectOutOfSpan = conf.getBoolean( + REJECT_OUT_OF_SPAN_OPERATIONS, false); + // attach the job ID if there is one in the configuration used + // to create this file. + String jobID = extractJobID(conf); + if (jobID != null) { + addAttribute(AuditConstants.PARAM_JOB_ID, jobID); + } + headerEnabled = getConfig().getBoolean(REFERRER_HEADER_ENABLED, + REFERRER_HEADER_ENABLED_DEFAULT); + filters = conf.getTrimmedStringCollection(REFERRER_HEADER_FILTER); + final CommonAuditContext currentContext = currentAuditContext(); + warningSpan = new WarningSpan(OUTSIDE_SPAN, + currentContext, createSpanID(), null, null); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "LoggingAuditor{"); + sb.append("ID='").append(getAuditorId()).append('\''); + sb.append(", headerEnabled=").append(headerEnabled); + sb.append(", rejectOutOfSpan=").append(rejectOutOfSpan); + sb.append('}'); + return sb.toString(); + } + + @Override + public AuditSpanS3A createSpan(final String operation, + @Nullable final String path1, + @Nullable final String path2) { + LoggingAuditSpan span = new LoggingAuditSpan( + createSpanID(), + operation, + prepareActiveContext(), + path1, + path2); + span.start(); + return span; + } + + /** + * Get/Prepare the active context for a span. + * @return the common audit context. + */ + private CommonAuditContext prepareActiveContext() { + return currentAuditContext(); + } + + /** + * Add an attribute. + * @param key key + * @param value value + */ + public final void addAttribute(String key, String value) { + attributes.put(key, value); + } + + @Override + public AuditSpanS3A getUnbondedSpan() { + return warningSpan; + } + + /** + * Get the last header used. + * @return the last referrer header generated. + */ + public String getLastHeader() { + return lastHeader; + } + + /** + * Set that last header. + * @param lastHeader the value for the lastHeader field. + */ + private void setLastHeader(final String lastHeader) { + this.lastHeader = lastHeader; + } + + /** + * Span which logs at debug and sets the HTTP referrer on + * invocations. + * Note: checkstyle complains that this should be final because + * it is private. This is not true, as it is subclassed in + * the same file. + */ + private class LoggingAuditSpan extends AbstractAuditSpanImpl { + + private final HttpReferrerAuditHeader referrer; + + private final String description; + + private LoggingAuditSpan( + final String spanId, + final String operationName, + final CommonAuditContext context, + final String path1, + final String path2) { + super(spanId, operationName); + + this.referrer = HttpReferrerAuditHeader.builder() + .withContextId(getAuditorId()) + .withSpanId(spanId) + .withOperationName(operationName) + .withPath1(path1) + .withPath2(path2) + .withAttributes(attributes) + // thread at the time of creation. + .withAttribute(PARAM_THREAD0, + currentThreadID()) + .withAttribute(PARAM_TIMESTAMP, Long.toString(getTimestamp())) + .withEvaluated(context.getEvaluatedEntries()) + .withFilter(filters) + .build(); + + this.description = referrer.buildHttpReferrer(); + } + + public void start() { + LOG.trace("{} Start {}", getSpanId(), getDescription()); + } + + /** + * Get the span description built in the constructor. + * @return description text. + */ + protected String getDescription() { + return description; + } + + /** + * Activate: log at TRACE. + * @return this span. + */ + @Override + public AuditSpanS3A activate() { + LOG.trace("[{}] {} Activate {}", + currentThreadID(), getSpanId(), getDescription()); + return this; + } + + /** + * Log at TRACE. + */ + @Override + public void deactivate() { + LOG.trace("[{}] {} Deactivate {}", + currentThreadID(), getSpanId(), getDescription()); + } + + + /** + * Pass to the HTTP referrer. + * {@inheritDoc} + */ + @Override + public void set(final String key, final String value) { + referrer.set(key, value); + } + + /** + * Before execution, the logging auditor always builds + * the referrer header, saves to the outer class + * (where {@link #getLastHeader()} can retrieve it, + * and logs at debug. + * If configured to add the header to the S3 logs, it will + * be set as the HTTP referrer. + * @param request request + * @param type of request. + * @return the request with any extra headers. + */ + @Override + public T beforeExecution( + final T request) { + // build the referrer header + final String header = referrer.buildHttpReferrer(); + // update the outer class's field. + setLastHeader(header); + if (headerEnabled) { + // add the referrer header + request.putCustomRequestHeader(HEADER_REFERRER, + header); + } + if (LOG.isDebugEnabled()) { + LOG.debug("[{}] {} Executing {} with {}; {}", + currentThreadID(), + getSpanId(), + getOperationName(), + analyzer.analyze(request), + header); + } + return request; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "LoggingAuditSpan{"); + sb.append(", id='").append(getSpanId()).append('\''); + sb.append("description='").append(description).append('\''); + sb.append('}'); + return sb.toString(); + } + + /** + * Get the referrer; visible for tests. + * @return the referrer. + */ + HttpReferrerAuditHeader getReferrer() { + return referrer; + } + } + + /** + * Span which logs at WARN; used to highlight spans + * without a containing span. + */ + private final class WarningSpan extends LoggingAuditSpan { + + private WarningSpan( + final String name, + final CommonAuditContext context, + final String spanId, + final String path1, final String path2) { + super(spanId, name, context, path1, path2); + } + + @Override + public void start() { + LOG.warn("[{}] {} Start {}", + currentThreadID(), getSpanId(), getDescription()); + } + + @Override + public AuditSpanS3A activate() { + LOG.warn("[{}] {} Activate {}", + currentThreadID(), getSpanId(), getDescription()); + return this; + } + + @Override + public boolean isValidSpan() { + return false; + } + + @Override + public T requestCreated( + final T request) { + String error = "Creating a request outside an audit span " + + analyzer.analyze(request); + LOG.info(error); + if (LOG.isDebugEnabled()) { + LOG.debug(error, new AuditFailureException("unaudited")); + } + return request; + } + + /** + * Handle requests made without a real context by logging and + * increment the failure count. + * Some requests (e.g. copy part) are not expected in spans due + * to how they are executed; these do not trigger failures. + * @param request request + * @param type of request + * @return an updated request. + * @throws AuditFailureException if failure is enabled. + */ + @Override + public T beforeExecution( + final T request) { + + + String error = "executing a request outside an audit span " + + analyzer.analyze(request); + LOG.warn("{} {}", + getSpanId(), error); + final String unaudited = getSpanId() + " " + + UNAUDITED_OPERATION + " " + error; + if (isRequestNotAlwaysInSpan(request)) { + // can get by auditing during a copy, so don't overreact + LOG.debug(unaudited); + } else { + final RuntimeException ex = new AuditFailureException(unaudited); + LOG.debug(unaudited, ex); + if (rejectOutOfSpan) { + throw ex; + } + } + // now hand off to the superclass for its normal preparation + return super.beforeExecution(request); + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopAuditManagerS3A.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopAuditManagerS3A.java new file mode 100644 index 0000000000000..2e51699da6f44 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopAuditManagerS3A.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit.impl; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import com.amazonaws.handlers.RequestHandler2; +import com.amazonaws.services.s3.transfer.Transfer; +import com.amazonaws.services.s3.transfer.internal.TransferStateChangeListener; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.audit.AuditManagerS3A; +import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; +import org.apache.hadoop.fs.s3a.audit.OperationAuditor; +import org.apache.hadoop.fs.s3a.audit.OperationAuditorOptions; +import org.apache.hadoop.service.CompositeService; + +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; + +/** + * Simple No-op audit manager for use before a real + * audit chain is set up, and for testing. + * Audit spans always have a unique ID and the activation/deactivation + * operations on them will update this audit manager's active span. + * It does have the service lifecycle, so do + * create a unique instance whenever used. + */ +@InterfaceAudience.Private +public class NoopAuditManagerS3A extends CompositeService + implements AuditManagerS3A, NoopSpan.SpanActivationCallbacks { + + private static final NoopAuditor NOOP_AUDITOR = + NoopAuditor.createAndStartNoopAuditor(new Configuration(), null); + + /** + * The inner auditor. + */ + private NoopAuditor auditor = NOOP_AUDITOR; + + /** + * Thread local span. This defaults to being + * the unbonded span. + */ + private final ThreadLocal activeSpan = + ThreadLocal.withInitial(this::getUnbondedSpan); + + /** + * ID which is returned as a span ID in the audit event + * callbacks. + */ + private final String id; + + /** + * Constructor. + * Will create and start a new instance of the auditor. + */ + public NoopAuditManagerS3A() { + super("NoopAuditManagerS3A"); + id = UUID.randomUUID().toString(); + } + + @Override + protected void serviceInit(final Configuration conf) throws Exception { + super.serviceInit(conf); + NoopAuditor audit = new NoopAuditor(this); + final OperationAuditorOptions options = + OperationAuditorOptions.builder() + .withConfiguration(conf) + .withIoStatisticsStore(iostatisticsStore().build()); + addService(audit); + audit.init(options); + } + + @Override + public String getSpanId() { + return id; + } + + @Override + public String getOperationName() { + return getName(); + } + + @Override + public OperationAuditor getAuditor() { + return auditor; + } + + /** + * Unbonded span to use after deactivation. + */ + private AuditSpanS3A getUnbondedSpan() { + return auditor.getUnbondedSpan(); + } + + @Override + public AuditSpanS3A getActiveAuditSpan() { + return NoopSpan.INSTANCE; + } + + @Override + public AuditSpanS3A createSpan(final String operation, + @Nullable final String path1, + @Nullable final String path2) throws IOException { + return createNewSpan(operation, path1, path2); + } + + @Override + public List createRequestHandlers() throws IOException { + return new ArrayList<>(); + } + + @Override + public TransferStateChangeListener createStateChangeListener() { + return new TransferStateChangeListener() { + public void transferStateChanged(final Transfer transfer, + final Transfer.TransferState state) { + } + }; + } + + /** + * Forward to the auditor. + * @param path path to check + * @param status status of the path. + * @param mode access mode. + * @throws IOException failure + */ + @Override + public boolean checkAccess(final Path path, + final S3AFileStatus status, + final FsAction mode) + throws IOException { + return auditor.checkAccess(path, status, mode); + } + + @Override + public void activate(final AuditSpanS3A span) { + activeSpan.set(span); + } + + @Override + public void deactivate(final AuditSpanS3A span) { + activate(getUnbondedSpan()); + } + + /** + * A static source of no-op spans, using the same span ID + * source as managed spans. + * @param name operation name. + * @param path1 first path of operation + * @param path2 second path of operation + * @return a span for the audit + */ + public static AuditSpanS3A createNewSpan( + final String name, + final String path1, + final String path2) { + return NOOP_AUDITOR.createSpan(name, path1, path2); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopAuditor.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopAuditor.java new file mode 100644 index 0000000000000..ca6a7dd840fa8 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopAuditor.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit.impl; + +import javax.annotation.Nullable; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; +import org.apache.hadoop.fs.s3a.audit.OperationAuditorOptions; + +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; + +/** + * An audit service which returns the {@link NoopSpan}. + * Even though the spans are no-ops, each span is still + * created with a unique span ID. + */ +public class NoopAuditor extends AbstractOperationAuditor { + + /** + * unbonded span created in constructor. + */ + private final AuditSpanS3A unbondedSpan; + + /** + * Activation callbacks. + */ + private final NoopSpan.SpanActivationCallbacks activationCallbacks; + + /** + * Constructor. + * This will be used when the auditor is created through + * configuration and classloading. + */ + public NoopAuditor() { + this(null); + } + + /** + * Constructor when explicitly created within + * the {@link NoopAuditManagerS3A}. + * @param activationCallbacks Activation callbacks. + */ + public NoopAuditor( + NoopSpan.SpanActivationCallbacks activationCallbacks) { + super("NoopAuditor"); + this.unbondedSpan = createSpan("unbonded", null, null); + this.activationCallbacks = activationCallbacks; + } + + @Override + public AuditSpanS3A createSpan( + final String operation, + @Nullable final String path1, + @Nullable final String path2) { + return new NoopSpan(createSpanID(), operation, path1, path2, + activationCallbacks); + } + + @Override + public AuditSpanS3A getUnbondedSpan() { + return unbondedSpan; + } + + /** + * Create, init and start an instance. + * @param conf configuration. + * @param activationCallbacks Activation callbacks. + * @return a started instance. + */ + public static NoopAuditor createAndStartNoopAuditor(Configuration conf, + NoopSpan.SpanActivationCallbacks activationCallbacks) { + NoopAuditor noop = new NoopAuditor(activationCallbacks); + final OperationAuditorOptions options = + OperationAuditorOptions.builder() + .withConfiguration(conf) + .withIoStatisticsStore(iostatisticsStore().build()); + noop.init(options); + noop.start(); + return noop; + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopSpan.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopSpan.java new file mode 100644 index 0000000000000..0b311dcdf61ab --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/NoopSpan.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit.impl; + +import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; + +/** + * A minimal span with no direct side effects. + * It does have an ID and, if given callbacks, + * will notify the callback implementation + * of activation and deactivation. + * Subclassable for tests. + */ +public class NoopSpan extends AbstractAuditSpanImpl { + + private final String path1; + + private final String path2; + + /** Activation callbacks. */ + private final SpanActivationCallbacks activationCallbacks; + + /** + * Static public instance. + */ + public static final NoopSpan INSTANCE = new NoopSpan(); + + /** + * Create a no-op span. + * @param spanId span ID + * @param operationName operation name + * @param path1 path + * @param path2 path 2 + * @param activationCallbacks Activation callbacks. + */ + protected NoopSpan(String spanId, + final String operationName, + final String path1, + final String path2, + final SpanActivationCallbacks activationCallbacks) { + super(spanId, operationName); + this.path1 = path1; + this.path2 = path2; + this.activationCallbacks = activationCallbacks; + } + + protected NoopSpan() { + this("", "no-op", null, null, null); + } + + + @Override + public AuditSpanS3A activate() { + if (activationCallbacks != null) { + activationCallbacks.activate(this); + } + return this; + } + + @Override + public void deactivate() { + if (activationCallbacks != null) { + activationCallbacks.deactivate(this); + } + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder("NoopSpan{"); + sb.append("id='").append(getSpanId()).append('\''); + sb.append("name='").append(getOperationName()).append('\''); + sb.append(", path1='").append(path1).append('\''); + sb.append(", path2='").append(path2).append('\''); + sb.append('}'); + return sb.toString(); + } + + /** Activation callbacks. */ + public interface SpanActivationCallbacks { + + /** + * Span was activated. + * @param span span reference. + */ + void activate(AuditSpanS3A span); + + /** + * Span was deactivated. + * @param span span reference. + */ + void deactivate(AuditSpanS3A span); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/S3AInternalAuditConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/S3AInternalAuditConstants.java new file mode 100644 index 0000000000000..f82e3d7f1e5e8 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/S3AInternalAuditConstants.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit.impl; + +import com.amazonaws.handlers.HandlerContextKey; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.s3a.audit.AWSAuditEventCallbacks; + +/** + * Internal constants; not intended for public use, or + * for use by any external implementations. + */ +@InterfaceAudience.Private +public final class S3AInternalAuditConstants { + + private S3AInternalAuditConstants() { + } + + /** + * Handler key for audit span callbacks. + * This is used to bind the handler in the AWS code. + */ + public static final HandlerContextKey + AUDIT_SPAN_HANDLER_CONTEXT = + new HandlerContextKey<>( + "org.apache.hadoop.fs.s3a.audit.AWSAuditEventCallbacks"); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/package-info.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/package-info.java new file mode 100644 index 0000000000000..d4553cc159fb2 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/package-info.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * The internal implementations of auditing support. + * This package is not for use by extensions. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +package org.apache.hadoop.fs.s3a.audit.impl; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; \ No newline at end of file diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/package-info.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/package-info.java new file mode 100644 index 0000000000000..5e4f8580e98c3 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/package-info.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Support for auditing and ultimately tracing operations. + * This is a public API for extension points, e.g. opentracing. + * However, it is very unstable as we evolve how best to audit/trace + * operation. + * + * An audit service is instantiated when an S3A Filesystem is initialized + * during creation. + * The choice of service is determined in the configuration option + * {@link org.apache.hadoop.fs.s3a.audit.S3AAuditConstants#AUDIT_SERVICE_CLASSNAME}. + * The service MUST implement the interface + * {@link org.apache.hadoop.fs.s3a.audit.OperationAuditor} + * to provide an {@link org.apache.hadoop.fs.store.audit.AuditSpan} whenever + * an operation is started through a public FileSystem API call + * (+some other operations). + */ + +@InterfaceAudience.LimitedPrivate("S3A auditing extensions") +@InterfaceStability.Unstable +package org.apache.hadoop.fs.s3a.audit; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; \ No newline at end of file diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java index f73689979ec25..1fd24969d723d 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.s3a.commit; +import javax.annotation.Nullable; import java.io.FileNotFoundException; import java.io.IOException; import java.text.DateFormat; @@ -30,6 +31,7 @@ import java.util.concurrent.TimeUnit; import com.amazonaws.services.s3.model.MultipartUpload; + import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -42,8 +44,11 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.s3a.Invoker; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.audit.AuditConstants; +import org.apache.hadoop.fs.audit.CommonAuditContext; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.fs.store.audit.AuditSpanSource; import org.apache.hadoop.fs.s3a.commit.files.PendingSet; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; import org.apache.hadoop.fs.s3a.commit.files.SuccessData; @@ -60,11 +65,13 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.functional.InvocationRaisingIOE; import static org.apache.hadoop.fs.s3a.Constants.THREAD_POOL_SHUTDOWN_DELAY_SECONDS; import static org.apache.hadoop.fs.s3a.Invoker.ignoreIOExceptions; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_COMMIT_JOB; +import static org.apache.hadoop.fs.audit.CommonAuditContext.currentAuditContext; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; import static org.apache.hadoop.fs.s3a.commit.CommitUtils.*; import static org.apache.hadoop.fs.s3a.commit.CommitUtilsWithMR.*; @@ -129,6 +136,8 @@ public abstract class AbstractS3ACommitter extends PathOutputCommitter */ private final JobUUIDSource uuidSource; + private final CommonAuditContext commonAuditContext; + /** * Has this instance been used for job setup? * If so then it is safe for a locally generated @@ -175,6 +184,11 @@ public abstract class AbstractS3ACommitter extends PathOutputCommitter private final CommitterStatistics committerStatistics; + /** + * Source of Audit spans. + */ + private final AuditSpanSource auditSpanSource; + /** * Create a committer. * This constructor binds the destination directory and configuration, but @@ -203,6 +217,13 @@ protected AbstractS3ACommitter( LOG.debug("{} instantiated for job \"{}\" ID {} with destination {}", role, jobName(context), jobIdString(context), outputPath); S3AFileSystem fs = getDestS3AFS(); + // set this thread's context with the job ID. + // audit spans created in this thread will pick + // up this value. + this.commonAuditContext = currentAuditContext(); + updateCommonContext(); + // the filesystem is the span source, always. + auditSpanSource = fs.getAuditSpanSource(); this.createJobMarker = context.getConfiguration().getBoolean( CREATE_SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, DEFAULT_CREATE_SUCCESSFUL_JOB_DIR_MARKER); @@ -535,6 +556,8 @@ public void setupJob(JobContext context) throws IOException { @Override public void setupTask(TaskAttemptContext context) throws IOException { TaskAttemptID attemptID = context.getTaskAttemptID(); + updateCommonContext(); + try (DurationInfo d = new DurationInfo(LOG, "Setup Task %s", attemptID)) { // reject attempts to set up the task where the output won't be @@ -947,11 +970,11 @@ public void cleanupJob(JobContext context) throws IOException { protected void maybeIgnore( boolean suppress, String action, - Invoker.VoidOperation operation) throws IOException { + InvocationRaisingIOE operation) throws IOException { if (suppress) { ignoreIOExceptions(LOG, action, "", operation); } else { - operation.execute(); + operation.apply(); } } @@ -1361,6 +1384,43 @@ public String toString() { } } + /** + * Add jobID to current context. + */ + protected final void updateCommonContext() { + currentAuditContext().put(AuditConstants.PARAM_JOB_ID, uuid); + } + + /** + * Remove JobID from the current thread's context. + */ + protected final void resetCommonContext() { + currentAuditContext().remove(AuditConstants.PARAM_JOB_ID); + } + + protected AuditSpanSource getAuditSpanSource() { + return auditSpanSource; + } + + /** + * Start an operation; retrieve an audit span. + * + * All operation names SHOULD come from + * {@code StoreStatisticNames} or + * {@code StreamStatisticNames}. + * @param name operation name. + * @param path1 first path of operation + * @param path2 second path of operation + * @return a span for the audit + * @throws IOException failure + */ + protected AuditSpan startOperation(String name, + @Nullable String path1, + @Nullable String path2) + throws IOException { + return getAuditSpanSource().createSpan(name, path1, path2); + } + /** * State of the active commit operation. * diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java index cd8257f97fb91..a4f13b78a3f0c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java @@ -48,10 +48,11 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3AUtils; -import org.apache.hadoop.fs.s3a.WriteOperationHelper; +import org.apache.hadoop.fs.s3a.WriteOperations; import org.apache.hadoop.fs.s3a.commit.files.PendingSet; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; import org.apache.hadoop.fs.s3a.commit.files.SuccessData; +import org.apache.hadoop.fs.s3a.impl.AbstractStoreOperation; import org.apache.hadoop.fs.s3a.impl.HeaderProcessing; import org.apache.hadoop.fs.s3a.impl.InternalConstants; import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; @@ -65,11 +66,13 @@ import static java.util.Objects.requireNonNull; import static org.apache.hadoop.fs.s3a.S3AUtils.*; +import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_COMMIT_JOB; import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_MATERIALIZE_FILE; import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_STAGE_FILE_UPLOAD; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDuration; +import static org.apache.hadoop.util.functional.RemoteIterators.cleanupRemoteIterator; /** * The implementation of the various actions a committer needs. @@ -81,7 +84,8 @@ * duplicate that work. * */ -public class CommitOperations implements IOStatisticsSource { +public class CommitOperations extends AbstractStoreOperation + implements IOStatisticsSource { private static final Logger LOG = LoggerFactory.getLogger( CommitOperations.class); @@ -96,7 +100,7 @@ public class CommitOperations implements IOStatisticsSource { /** * Write operations for the destination fs. */ - private final WriteOperationHelper writeOperations; + private final WriteOperations writeOperations; /** * Filter to find all {code .pendingset} files. @@ -113,21 +117,29 @@ public class CommitOperations implements IOStatisticsSource { /** * Instantiate. * @param fs FS to bind to + * @throws IOException failure to bind. */ - public CommitOperations(S3AFileSystem fs) { + public CommitOperations(S3AFileSystem fs) throws IOException { this(requireNonNull(fs), fs.newCommitterStatistics()); } /** - * Instantiate. + * Instantiate. This creates a new audit span for + * the commit operations. * @param fs FS to bind to * @param committerStatistics committer statistics + * @throws IOException failure to bind. */ public CommitOperations(S3AFileSystem fs, - CommitterStatistics committerStatistics) { - this.fs = requireNonNull(fs); + CommitterStatistics committerStatistics) throws IOException { + super(requireNonNull(fs).createStoreContext()); + this.fs = fs; statistics = requireNonNull(committerStatistics); - writeOperations = fs.getWriteOperationHelper(); + // create a span + writeOperations = fs.createWriteOperationHelper( + fs.getAuditSpanSource().createSpan( + COMMITTER_COMMIT_JOB.getSymbol(), + "/", null)); } /** @@ -365,6 +377,7 @@ public MaybeIOE abortAllSinglePendingCommits(Path pendingDir, } } } + cleanupRemoteIterator(pendingFiles); return outcome; } @@ -388,7 +401,7 @@ protected RemoteIterator ls(Path path, boolean recursive) */ public List listPendingUploadsUnderPath(Path dest) throws IOException { - return fs.listMultipartUploads(fs.pathToKey(dest)); + return writeOperations.listMultipartUploads(fs.pathToKey(dest)); } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitUtils.java index 9c684c77e1b25..6ea65b9132f8f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitUtils.java @@ -125,5 +125,29 @@ public static void validateCollectionClass(Iterable it, Class classname) } } + /** + * Extract the job ID from a configuration. + * @param conf configuration + * @return a job ID or null. + */ + public static String extractJobID(Configuration conf) { + + String jobUUID = conf.getTrimmed(FS_S3A_COMMITTER_UUID, ""); + + if (!jobUUID.isEmpty()) { + return jobUUID; + } + // there is no job UUID. + // look for one from spark + jobUUID = conf.getTrimmed(SPARK_WRITE_UUID, ""); + if (!jobUUID.isEmpty()) { + return jobUUID; + } + jobUUID = conf.getTrimmed(MR_JOB_ID, ""); + if (!jobUUID.isEmpty()) { + return jobUUID; + } + return null; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/InternalCommitterConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/InternalCommitterConstants.java index 461c9a5e646c3..fcafdd1ed1280 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/InternalCommitterConstants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/InternalCommitterConstants.java @@ -124,4 +124,10 @@ private InternalCommitterConstants() { "Job/task context does not contain a unique ID in " + SPARK_WRITE_UUID; + /** + * The MR job ID; copies from MRJobConfig so that it can be + * referred to without needing hadoop-mapreduce on the classpath. + */ + public static final String MR_JOB_ID = "mapreduce.job.id"; + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitIntegration.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitIntegration.java index 41f95c8620692..41d36b2a8d7a0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitIntegration.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitIntegration.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.Statistic; import org.apache.hadoop.fs.s3a.commit.magic.MagicCommitTracker; -import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.s3a.impl.AbstractStoreOperation; import static org.apache.hadoop.fs.s3a.commit.MagicCommitPaths.*; @@ -45,14 +45,12 @@ *

    Important

    : must not directly or indirectly import a class which * uses any datatype in hadoop-mapreduce. */ -public class MagicCommitIntegration { +public class MagicCommitIntegration extends AbstractStoreOperation { private static final Logger LOG = LoggerFactory.getLogger(MagicCommitIntegration.class); private final S3AFileSystem owner; private final boolean magicCommitEnabled; - private final StoreContext storeContext; - /** * Instantiate. * @param owner owner class @@ -60,9 +58,9 @@ public class MagicCommitIntegration { */ public MagicCommitIntegration(S3AFileSystem owner, boolean magicCommitEnabled) { + super(owner.createStoreContext()); this.owner = owner; this.magicCommitEnabled = magicCommitEnabled; - this.storeContext = owner.createStoreContext(); } /** @@ -85,6 +83,9 @@ public String keyOfFinalDestination(List elements, String key) { * Given a path and a key to that same path, create a tracker for it. * This specific tracker will be chosen based on whether or not * the path is a magic one. + * Auditing: the span used to invoke + * this method will be the one used to create the write operation helper + * for the commit tracker. * @param path path of nominal write * @param key key of path of nominal write * @return the tracker for this operation. @@ -98,10 +99,10 @@ public PutTracker createTracker(Path path, String key) { if (isMagicCommitPath(elements)) { final String destKey = keyOfFinalDestination(elements, key); String pendingsetPath = key + CommitConstants.PENDING_SUFFIX; - storeContext.incrementStatistic( + getStoreContext().incrementStatistic( Statistic.COMMITTER_MAGIC_FILES_CREATED); tracker = new MagicCommitTracker(path, - storeContext.getBucket(), + getStoreContext().getBucket(), key, destKey, pendingsetPath, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java index c3a70bffac057..71af2bef24bd8 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java @@ -65,7 +65,7 @@ public class MagicCommitTracker extends PutTracker { * @param originalDestKey the original key, in the magic directory. * @param destKey key for the destination * @param pendingsetKey key of the pendingset file - * @param writer writer instance to use for operations + * @param writer writer instance to use for operations; includes audit span */ public MagicCommitTracker(Path path, String bucket, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java index 59114f7ab7340..ea49988c101e8 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java @@ -56,6 +56,7 @@ import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; import static org.apache.hadoop.fs.s3a.commit.CommitUtils.*; import static org.apache.hadoop.fs.s3a.commit.CommitUtilsWithMR.*; +import static org.apache.hadoop.util.functional.RemoteIterators.cleanupRemoteIterator; /** * Committer based on the contributed work of the @@ -828,6 +829,7 @@ protected PathExistsException failDestinationExists(final Path path, ? " dir" : ("file size " + status.getLen() + " bytes")); } + cleanupRemoteIterator(lf); } catch (IOException e) { LOG.info("Discarding exception raised when listing {}: " + e, path); LOG.debug("stack trace ", e); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java index 98b76b15da48f..cbca14660d35f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java @@ -18,24 +18,46 @@ package org.apache.hadoop.fs.s3a.impl; +import org.apache.hadoop.fs.store.audit.AuditSpan; + import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * Base class of operations in the store. * An operation is something which executes against the context to * perform a single function. - * It is expected to have a limited lifespan. */ public abstract class AbstractStoreOperation { + /** + * Store context. + */ private final StoreContext storeContext; /** - * constructor. + * Audit Span. + */ + private AuditSpan auditSpan; + + /** + * Constructor. + * Picks up the active audit span from the store context and + * stores it for later. * @param storeContext store context. */ protected AbstractStoreOperation(final StoreContext storeContext) { + this(storeContext, storeContext.getActiveAuditSpan()); + } + + /** + * Constructor. + * @param storeContext store context. + * @param auditSpan active span + */ + protected AbstractStoreOperation(final StoreContext storeContext, + final AuditSpan auditSpan) { this.storeContext = checkNotNull(storeContext); + this.auditSpan = checkNotNull(auditSpan); } /** @@ -46,4 +68,18 @@ public final StoreContext getStoreContext() { return storeContext; } + /** + * Get the audit span this object was created with. + * @return the current span + */ + public AuditSpan getAuditSpan() { + return auditSpan; + } + + /** + * Activate the audit span. + */ + public void activateAuditSpan() { + auditSpan.activate(); + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java index b788f507735a0..259738f99892a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java @@ -32,6 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.store.audit.AuditSpan; import org.apache.hadoop.util.DurationInfo; import static org.apache.hadoop.fs.impl.FutureIOSupport.raiseInnerCause; @@ -41,24 +42,48 @@ * raised by the callable and wrapping them as appropriate. * @param return type. */ -public final class CallableSupplier implements Supplier { +public final class CallableSupplier implements Supplier { private static final Logger LOG = LoggerFactory.getLogger(CallableSupplier.class); private final Callable call; + /** + * Audit Span; may be null. + */ + private final AuditSpan auditSpan; + /** * Create. * @param call call to invoke. */ public CallableSupplier(final Callable call) { + this(null, call); + } + + /** + * Create. + * @param auditSpan audit span (or null) + * @param call call to invoke. + */ + public CallableSupplier( + final AuditSpan auditSpan, + final Callable call) { this.call = call; + this.auditSpan = auditSpan; } + /** + * Active any span and then call the supplied callable. + * @return the result. + */ @Override - public Object get() { + public T get() { try { + if (auditSpan != null) { + auditSpan.activate(); + } return call.call(); } catch (RuntimeException e) { throw e; @@ -86,11 +111,32 @@ public static CompletableFuture submit( return CompletableFuture.supplyAsync( new CallableSupplier(call), executor); } + /** + * Submit a callable into a completable future. + * RTEs are rethrown. + * Non RTEs are caught and wrapped; IOExceptions to + * {@code RuntimeIOException} instances. + * @param executor executor. + * @param auditSpan audit span (or null) + * @param call call to invoke + * @param type + * @return the future to wait for + */ + @SuppressWarnings("unchecked") + public static CompletableFuture submit( + final Executor executor, + final AuditSpan auditSpan, + final Callable call) { + return CompletableFuture.supplyAsync( + new CallableSupplier(auditSpan, call), + executor); + } /** * Wait for a list of futures to complete. If the list is empty, * return immediately. * @param futures list of futures. + * @param type * @throws IOException if one of the called futures raised an IOE. * @throws RuntimeException if one of the futures raised one. */ @@ -108,6 +154,7 @@ public static void waitForCompletion( /** * Wait for a single of future to complete, extracting IOEs afterwards. * @param future future to wait for. + * @param type * @throws IOException if one of the called futures raised an IOE. * @throws RuntimeException if one of the futures raised one. */ @@ -127,6 +174,7 @@ public static void waitForCompletion( /** * Wait for a single of future to complete, ignoring exceptions raised. * @param future future to wait for. + * @param type */ public static void waitForCompletionIgnoringExceptions( @Nullable final CompletableFuture future) { diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeDetectionPolicy.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeDetectionPolicy.java index 2d49cc939a41a..205826dadae6f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeDetectionPolicy.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeDetectionPolicy.java @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.S3ObjectAttributes; import org.apache.hadoop.fs.s3a.RemoteFileChangedException; +import org.apache.hadoop.fs.store.LogExactlyOnce; import static org.apache.hadoop.fs.s3a.Constants.*; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java index 27ac7dec1dd19..42bee714da52f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java @@ -22,10 +22,10 @@ import java.io.IOException; import java.nio.file.AccessDeniedException; -import com.amazonaws.services.s3.model.ObjectMetadata; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.Retries; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.store.audit.AuditSpan; /** * An interface to implement for providing accessors to @@ -85,13 +85,16 @@ public interface ContextAccessors { Path makeQualified(Path path); /** - * Retrieve the object metadata. - * - * @param key key to retrieve. - * @return metadata - * @throws IOException IO and object access problems. + * Return the active audit span. + * This is thread local -it MUST be picked up and passed into workers. + * Collect and cache the value during construction. + * @return active audit span. */ - @Retries.RetryTranslated - ObjectMetadata getObjectMetadata(String key) throws IOException; + AuditSpan getActiveAuditSpan(); + /** + * Get the request factory. + * @return the factory for requests. + */ + RequestFactory getRequestFactory(); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java index 2292179b3fd66..5db996763885b 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java @@ -46,6 +46,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DurationInfo; +import static org.apache.hadoop.fs.store.audit.AuditingFunctions.callableWithinAuditSpan; import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.maybeAwaitCompletion; import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit; @@ -504,13 +505,15 @@ private CompletableFuture submitDelete( return null; } filesDeleted += keyList.size(); - return submit(executor, () -> { - asyncDeleteAction(operationState, - keyList, - pathList, - LOG.isDebugEnabled()); - return null; - }); + return submit(executor, + callableWithinAuditSpan( + getAuditSpan(), () -> { + asyncDeleteAction(operationState, + keyList, + pathList, + LOG.isDebugEnabled()); + return null; + })); } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ExecutingStoreOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ExecutingStoreOperation.java index ba8e743f293cb..fdaeb8ebba457 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ExecutingStoreOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ExecutingStoreOperation.java @@ -23,14 +23,21 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.util.functional.CallableRaisingIOE; + /** * A subclass of {@link AbstractStoreOperation} which * provides a method {@link #execute()} that may be invoked * exactly once. + * It declares itself a {@code CallableRaisingIOE} and + * can be handed straight to methods which take those + * as parameters. * @param return type of executed operation. */ public abstract class ExecutingStoreOperation - extends AbstractStoreOperation { + extends AbstractStoreOperation + implements CallableRaisingIOE { /** * Used to stop any re-entrancy of the rename. @@ -39,11 +46,34 @@ public abstract class ExecutingStoreOperation private final AtomicBoolean executed = new AtomicBoolean(false); /** - * constructor. + * Constructor. + * Picks up the active audit span from the store context and + * stores it for later. * @param storeContext store context. */ protected ExecutingStoreOperation(final StoreContext storeContext) { - super(storeContext); + this(storeContext, storeContext.getActiveAuditSpan()); + } + + /** + * Constructor. + * @param storeContext store context. + * @param auditSpan active span + */ + protected ExecutingStoreOperation( + final StoreContext storeContext, + final AuditSpan auditSpan) { + super(storeContext, auditSpan); + } + + /** + * Apply calls {@link #execute()}. + * @return the result. + * @throws IOException IO problem + */ + @Override + public final T apply() throws IOException { + return execute(); } /** @@ -53,17 +83,19 @@ protected ExecutingStoreOperation(final StoreContext storeContext) { * @return the result. * @throws IOException IO problem */ - public abstract T execute() throws IOException ; + public abstract T execute() throws IOException; /** * Check that the operation has not been invoked twice. * This is an atomic check. + * After the check: activates the span. * @throws IllegalStateException on a second invocation. */ protected void executeOnlyOnce() { Preconditions.checkState( !executed.getAndSet(true), "Operation attempted twice"); + activateAuditSpan(); } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/GetContentSummaryOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/GetContentSummaryOperation.java new file mode 100644 index 0000000000000..23631c67e30a1 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/GetContentSummaryOperation.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.s3a.Retries; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; + +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; + +/** + * GetContentSummary operation. + * This is based on {@code FileSystem.get#getContentSummary}; + * its still doing sequential treewalk with the efficiency + * issues. + * + * Changes: + * 1. On the recursive calls there + * is no probe to see if the path is a file: we know the + * recursion only happens with a dir. + * 2. If a subdirectory is not found during the walk, that + * does not trigger an error. The directory is clearly + * not part of the content any more. + * + * The Operation serves up IOStatistics; this counts + * the cost of all the list operations, but not the + * initial HEAD probe to see if the path is a file. + */ +public class GetContentSummaryOperation extends + ExecutingStoreOperation implements IOStatisticsSource { + + private static final Logger LOG = LoggerFactory.getLogger( + GetContentSummaryOperation.class); + + /** + * Directory to scan. + */ + private final Path path; + + /** + * Callbacks to the store. + */ + private final GetContentSummaryCallbacks callbacks; + + /** + * IOStatistics to serve up. + */ + private final IOStatisticsSnapshot iostatistics = + new IOStatisticsSnapshot(); + + /** + * Constructor. + * @param storeContext context. + * @param path path to summarize + * @param callbacks callbacks for S3 access. + */ + public GetContentSummaryOperation( + final StoreContext storeContext, + final Path path, + final GetContentSummaryCallbacks callbacks) { + super(storeContext); + this.path = path; + this.callbacks = callbacks; + } + + @Override + public IOStatistics getIOStatistics() { + return iostatistics; + } + + /** + * Return the {@link ContentSummary} of a given path. + * @return the summary. + * @throws FileNotFoundException if the path does not resolve + * @throws IOException failure + */ + @Override + @Retries.RetryTranslated + public ContentSummary execute() throws IOException { + FileStatus status = probePathStatusOrNull(path, StatusProbeEnum.FILE); + if (status != null && status.isFile()) { + // f is a file + long length = status.getLen(); + return new ContentSummary.Builder().length(length). + fileCount(1).directoryCount(0).spaceConsumed(length).build(); + } + final ContentSummary summary = getDirSummary(path); + // Log the IOStatistics at debug so the cost of the operation + // can be made visible. + LOG.debug("IOStatistics of getContentSummary({}):\n{}", path, iostatistics); + return summary; + } + + /** + * Return the {@link ContentSummary} of a given directory. + * This is a recursive operation (as the original is); + * it'd be more efficient of stack and heap if it managed its + * own stack. + * @param dir dir to scan + * @throws FileNotFoundException if the path does not resolve + * @throws IOException IO failure + * @return the content summary + * @throws FileNotFoundException the path does not exist + * @throws IOException failure + */ + public ContentSummary getDirSummary(Path dir) throws IOException { + long totalLength = 0; + long fileCount = 0; + long dirCount = 1; + final RemoteIterator it + = callbacks.listStatusIterator(dir); + + while (it.hasNext()) { + final S3AFileStatus s = it.next(); + if (s.isDirectory()) { + try { + ContentSummary c = getDirSummary(s.getPath()); + totalLength += c.getLength(); + fileCount += c.getFileCount(); + dirCount += c.getDirectoryCount(); + } catch (FileNotFoundException ignored) { + // path was deleted during the scan; exclude from + // summary. + } + } else { + totalLength += s.getLen(); + fileCount += 1; + } + } + // Add the list's IOStatistics + iostatistics.aggregate(retrieveIOStatistics(it)); + return new ContentSummary.Builder().length(totalLength). + fileCount(fileCount).directoryCount(dirCount). + spaceConsumed(totalLength).build(); + } + + /** + * Get the status of a path, downgrading FNFE to null result. + * @param p path to probe. + * @param probes probes to exec + * @return the status or null + * @throws IOException failure other than FileNotFound + */ + private S3AFileStatus probePathStatusOrNull(final Path p, + final Set probes) throws IOException { + try { + return callbacks.probePathStatus(p, probes); + } catch (FileNotFoundException fnfe) { + return null; + } + } + + /** + * Callbacks used by the operation. + */ + public interface GetContentSummaryCallbacks { + + /** + * Get the status of a path. + * @param path path to probe. + * @param probes probes to exec + * @return the status + * @throws IOException failure + */ + @Retries.RetryTranslated + S3AFileStatus probePathStatus(Path path, + Set probes) throws IOException; + + /** + * Incremental list of all entries in a directory. + * @param path path of dir + * @return an iterator + * @throws IOException failure + */ + RemoteIterator listStatusIterator(Path path) + throws IOException; + + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java index 5efec2b36dafe..8c39aa4632d37 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java @@ -35,6 +35,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.Retries; import org.apache.hadoop.fs.s3a.Statistic; import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; @@ -188,6 +189,15 @@ public class HeaderProcessing extends AbstractStoreOperation { public static final String XA_STORAGE_CLASS = XA_HEADER_PREFIX + Headers.STORAGE_CLASS; + /** + * HTTP Referrer for logs: {@value}. + * This can be found in S3 logs, but is not set as + * an attribute in objects. + * important: the header value is deliberately + * a mis-spelling, as that is defined in RFC-1945. + */ + public static final String HEADER_REFERRER = "Referer"; + /** * Standard headers which are retrieved from HEAD Requests * and set as XAttrs if the response included the relevant header. @@ -230,12 +240,23 @@ public class HeaderProcessing extends AbstractStoreOperation { public static final String CONTENT_TYPE_APPLICATION_XML = "application/xml"; + /** + * Directory content type : {@value}. + * Matches use/expectations of AWS S3 console. + */ + public static final String CONTENT_TYPE_X_DIRECTORY = + "application/x-directory"; + + private final HeaderProcessingCallbacks callbacks; /** * Construct. * @param storeContext store context. + * @param callbacks callbacks to the store */ - public HeaderProcessing(final StoreContext storeContext) { + public HeaderProcessing(final StoreContext storeContext, + final HeaderProcessingCallbacks callbacks) { super(storeContext); + this.callbacks = callbacks; } /** @@ -253,18 +274,17 @@ private Map retrieveHeaders( final Path path, final Statistic statistic) throws IOException { StoreContext context = getStoreContext(); - ContextAccessors accessors = context.getContextAccessors(); - String objectKey = accessors.pathToKey(path); + String objectKey = context.pathToKey(path); ObjectMetadata md; String symbol = statistic.getSymbol(); S3AStatisticsContext instrumentation = context.getInstrumentation(); try { md = trackDuration(instrumentation, symbol, () -> - accessors.getObjectMetadata(objectKey)); + callbacks.getObjectMetadata(objectKey)); } catch (FileNotFoundException e) { // no entry. It could be a directory, so try again. md = trackDuration(instrumentation, symbol, () -> - accessors.getObjectMetadata(objectKey + "/")); + callbacks.getObjectMetadata(objectKey + "/")); } // all user metadata Map rawHeaders = md.getUserMetadata(); @@ -443,7 +463,7 @@ public static Optional extractXAttrLongValue(byte[] data) { * @param source the {@link ObjectMetadata} to copy * @param dest the metadata to update; this is the return value. */ - public void cloneObjectMetadata(ObjectMetadata source, + public static void cloneObjectMetadata(ObjectMetadata source, ObjectMetadata dest) { // Possibly null attributes @@ -497,4 +517,16 @@ public void cloneObjectMetadata(ObjectMetadata source, .forEach(e -> dest.addUserMetadata(e.getKey(), e.getValue())); } + public interface HeaderProcessingCallbacks { + + /** + * Retrieve the object metadata. + * + * @param key key to retrieve. + * @return metadata + * @throws IOException IO and object access problems. + */ + @Retries.RetryTranslated + ObjectMetadata getObjectMetadata(String key) throws IOException; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java index a5ce1f68ad3fc..d6142f49c94a2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java @@ -37,6 +37,16 @@ */ public final class InternalConstants { + /** + * This declared delete as idempotent. + * This is an "interesting" topic in past Hadoop FS work. + * Essentially: with a single caller, DELETE is idempotent + * but in a shared filesystem, it is is very much not so. + * Here, on the basis that isn't a filesystem with consistency guarantees, + * retryable results in files being deleted. + */ + public static final boolean DELETE_CONSIDERED_IDEMPOTENT = true; + private InternalConstants() { } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ListingOperationCallbacks.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ListingOperationCallbacks.java index b0ebf1ca16975..99fb6809d93a3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ListingOperationCallbacks.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ListingOperationCallbacks.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.s3a.S3ListResult; import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; import org.apache.hadoop.fs.statistics.DurationTrackerFactory; +import org.apache.hadoop.fs.store.audit.AuditSpan; /** * These are all the callbacks which @@ -46,13 +47,15 @@ public interface ListingOperationCallbacks { * Retry policy: retry untranslated. * @param request request to initiate * @param trackerFactory tracker with statistics to update + * @param span audit span for this operation * @return the results * @throws IOException if the retry invocation raises one (it shouldn't). */ @Retries.RetryRaw CompletableFuture listObjectsAsync( - S3ListRequest request, - DurationTrackerFactory trackerFactory) + S3ListRequest request, + DurationTrackerFactory trackerFactory, + AuditSpan span) throws IOException; /** @@ -61,14 +64,16 @@ CompletableFuture listObjectsAsync( * @param request last list objects request to continue * @param prevResult last paged result to continue from * @param trackerFactory tracker with statistics to update + * @param span audit span for the IO * @return the next result object * @throws IOException none, just there for retryUntranslated. */ @Retries.RetryRaw CompletableFuture continueListObjectsAsync( - S3ListRequest request, - S3ListResult prevResult, - DurationTrackerFactory trackerFactory) + S3ListRequest request, + S3ListResult prevResult, + DurationTrackerFactory trackerFactory, + AuditSpan span) throws IOException; /** @@ -82,16 +87,19 @@ S3ALocatedFileStatus toLocatedFileStatus( throws IOException; /** * Create a {@code ListObjectsRequest} request against this bucket, - * with the maximum keys returned in a query set by + * with the maximum keys returned in a query set in the FS config. + * The active span for the FS is handed the request to prepare it + * before this method returns. * {@link #getMaxKeys()}. * @param key key for request * @param delimiter any delimiter + * @param span span within which the request takes place. * @return the request */ S3ListRequest createListObjectsRequest( - String key, - String delimiter); - + String key, + String delimiter, + AuditSpan span); /** * Return the number of bytes that large input files should be optimally diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MkdirOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MkdirOperation.java new file mode 100644 index 0000000000000..1d824201abeae --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MkdirOperation.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.AccessDeniedException; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.Retries; +import org.apache.hadoop.fs.s3a.S3AFileStatus; + +/** + * The mkdir operation. + * A walk up the ancestor list halting as soon as a directory (good) + * or file (bad) is found. + * Optimized with the expectation that there is a marker up the path + * or (ultimately) a sibling of the path being created. + * It performs the directory listing probe ahead of the simple object HEAD + * call for this reason -the object is the failure mode which SHOULD NOT + * be encountered on normal execution. + */ +public class MkdirOperation extends ExecutingStoreOperation { + + private static final Logger LOG = LoggerFactory.getLogger( + MkdirOperation.class); + + private final Path dir; + + private final MkdirCallbacks callbacks; + + public MkdirOperation( + final StoreContext storeContext, + final Path dir, + final MkdirCallbacks callbacks) { + super(storeContext); + this.dir = dir; + this.callbacks = callbacks; + } + + /** + * + * Make the given path and all non-existent parents into + * directories. + * @return true if a directory was created or already existed + * @throws FileAlreadyExistsException there is a file at the path specified + * @throws IOException other IO problems + */ + @Override + @Retries.RetryTranslated + public Boolean execute() throws IOException { + LOG.debug("Making directory: {}", dir); + if (dir.isRoot()) { + // fast exit for root. + return true; + } + + FileStatus fileStatus = getPathStatusExpectingDir(dir); + if (fileStatus != null) { + if (fileStatus.isDirectory()) { + return true; + } else { + throw new FileAlreadyExistsException("Path is a file: " + dir); + } + } + // dir, walk up tree + // Walk path to root, ensuring closest ancestor is a directory, not file + Path fPart = dir.getParent(); + try { + while (fPart != null && !fPart.isRoot()) { + fileStatus = getPathStatusExpectingDir(fPart); + if (fileStatus == null) { + // nothing at this path, so validate the parent + fPart = fPart.getParent(); + continue; + } + if (fileStatus.isDirectory()) { + // the parent dir exists. All is good. + break; + } + + // there's a file at the parent entry + throw new FileAlreadyExistsException(String.format( + "Can't make directory for path '%s' since it is a file.", + fPart)); + } + } catch (AccessDeniedException e) { + LOG.info("mkdirs({}}: Access denied when looking" + + " for parent directory {}; skipping checks", + dir, fPart); + LOG.debug("{}", e.toString(), e); + } + + // if we get here there is no directory at the destination. + // so create one. + String key = getStoreContext().pathToKey(dir); + // this will create the marker file, delete the parent entries + // and update S3Guard + callbacks.createFakeDirectory(key); + return true; + } + + /** + * Get the status of a path, downgrading FNFE to null result. + * @param path path to probe. + * @param probes probes to exec + * @return the status or null + * @throws IOException failure other than FileNotFound + */ + private S3AFileStatus probePathStatusOrNull(final Path path, + final Set probes) throws IOException { + try { + return callbacks.probePathStatus(path, probes); + } catch (FileNotFoundException fnfe) { + return null; + } + } + + /** + * Get the status of a path -optimized for paths + * where there is a directory marker or child entries. + * @param path path to probe. + * @return the status + * @throws IOException failure + */ + private S3AFileStatus getPathStatusExpectingDir(final Path path) + throws IOException { + S3AFileStatus status = probePathStatusOrNull(path, + StatusProbeEnum.DIRECTORIES); + if (status == null) { + status = probePathStatusOrNull(path, + StatusProbeEnum.FILE); + } + return status; + } + + /** + * Callbacks used by mkdir. + */ + public interface MkdirCallbacks { + + /** + * Get the status of a path. + * @param path path to probe. + * @param probes probes to exec + * @return the status + * @throws IOException failure + */ + @Retries.RetryTranslated + S3AFileStatus probePathStatus(Path path, + Set probes) throws IOException; + + /** + * Create a fake directory, always ending in "/". + * Retry policy: retrying; translated. + * @param key name of directory object. + * @throws IOException IO failure + */ + @Retries.RetryTranslated + void createFakeDirectory(String key) throws IOException; + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java index 5890ac01a9e74..7b13d0d3c7c42 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java @@ -46,6 +46,7 @@ import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.OperationDuration; +import static org.apache.hadoop.fs.store.audit.AuditingFunctions.callableWithinAuditSpan; import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_BLOCK_SIZE; import static org.apache.hadoop.fs.s3a.S3AUtils.objectRepresentsDirectory; @@ -381,7 +382,7 @@ protected Path renameFileToDest() throws IOException { * Execute a full recursive rename. * There is a special handling of directly markers here -only leaf markers * are copied. This reduces incompatibility "regions" across versions. -Are * @throws IOException failure + * @throws IOException failure */ protected void recursiveDirectoryRename() throws IOException { final StoreContext storeContext = getStoreContext(); @@ -596,15 +597,16 @@ protected CompletableFuture initiateCopy( source.getVersionId(), source.getLen()); // queue the copy operation for execution in the thread pool - return submit(getStoreContext().getExecutor(), () -> - copySourceAndUpdateTracker( - childSourcePath, - key, - sourceAttributes, - callbacks.createReadContext(source), - childDestPath, - newDestKey, - true)); + return submit(getStoreContext().getExecutor(), + callableWithinAuditSpan(getAuditSpan(), () -> + copySourceAndUpdateTracker( + childSourcePath, + key, + sourceAttributes, + callbacks.createReadContext(source), + childDestPath, + newDestKey, + true))); } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java new file mode 100644 index 0000000000000..f9ff08a5f6542 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java @@ -0,0 +1,695 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CannedAccessControlList; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CopyObjectRequest; +import com.amazonaws.services.s3.model.DeleteObjectRequest; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.GetObjectMetadataRequest; +import com.amazonaws.services.s3.model.GetObjectRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; +import com.amazonaws.services.s3.model.ListNextBatchOfObjectsRequest; +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ListObjectsV2Request; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.SSEAwsKeyManagementParams; +import com.amazonaws.services.s3.model.SSECustomerKey; +import com.amazonaws.services.s3.model.SelectObjectContentRequest; +import com.amazonaws.services.s3.model.UploadPartRequest; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.s3a.Retries; +import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecretOperations; +import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecrets; + +import static org.apache.commons.lang3.StringUtils.isNotEmpty; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DEFAULT_UPLOAD_PART_COUNT_LIMIT; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; + +/** + * The standard implementation of the request factory. + * This creates AWS SDK request classes for the specific bucket, + * with standard options/headers set. + * It is also where custom setting parameters can take place. + * + * All creation of AWS S3 requests MUST be through this class so that + * common options (encryption etc.) can be added here, + * and so that any chained transformation of requests can be applied. + * + * This is where audit span information is added to the requests, + * until it is done in the AWS SDK itself. + * + * All created requests will be passed through + * {@link PrepareRequest#prepareRequest(AmazonWebServiceRequest)} before + * being returned to the caller. + */ +public class RequestFactoryImpl implements RequestFactory { + + public static final Logger LOG = LoggerFactory.getLogger( + RequestFactoryImpl.class); + + /** + * Target bucket. + */ + private final String bucket; + + /** + * Encryption secrets. + */ + private EncryptionSecrets encryptionSecrets; + + /** + * ACL For new objects. + */ + private final CannedAccessControlList cannedACL; + + /** + * Max number of multipart entries allowed in a large + * upload. Tunable for testing only. + */ + private final long multipartPartCountLimit; + + /** + * Requester Pays. + * This is to be wired up in a PR with its + * own tests and docs. + */ + private final boolean requesterPays; + + /** + * Callback to prepare requests. + */ + private final PrepareRequest requestPreparer; + + /** + * Constructor. + * @param builder builder with all the configuration. + */ + protected RequestFactoryImpl( + final RequestFactoryBuilder builder) { + this.bucket = builder.bucket; + this.cannedACL = builder.cannedACL; + this.encryptionSecrets = builder.encryptionSecrets; + this.multipartPartCountLimit = builder.multipartPartCountLimit; + this.requesterPays = builder.requesterPays; + this.requestPreparer = builder.requestPreparer; + } + + /** + * Preflight preparation of AWS request. + * @param web service request + * @return prepared entry. + */ + @Retries.OnceRaw + private T prepareRequest(T t) { + return requestPreparer != null + ? requestPreparer.prepareRequest(t) + : t; + } + + /** + * Get the canned ACL of this FS. + * @return an ACL, if any + */ + @Override + public CannedAccessControlList getCannedACL() { + return cannedACL; + } + + /** + * Get the target bucket. + * @return the bucket. + */ + protected String getBucket() { + return bucket; + } + + /** + * Create the AWS SDK structure used to configure SSE, + * if the encryption secrets contain the information/settings for this. + * @return an optional set of KMS Key settings + */ + @Override + public Optional generateSSEAwsKeyParams() { + return EncryptionSecretOperations.createSSEAwsKeyManagementParams( + encryptionSecrets); + } + + /** + * Create the SSE-C structure for the AWS SDK, if the encryption secrets + * contain the information/settings for this. + * This will contain a secret extracted from the bucket/configuration. + * @return an optional customer key. + */ + @Override + public Optional generateSSECustomerKey() { + return EncryptionSecretOperations.createSSECustomerKey( + encryptionSecrets); + } + + /** + * Get the encryption algorithm of this endpoint. + * @return the encryption algorithm. + */ + @Override + public S3AEncryptionMethods getServerSideEncryptionAlgorithm() { + return encryptionSecrets.getEncryptionMethod(); + } + + /** + * Sets server side encryption parameters to the part upload + * request when encryption is enabled. + * @param request upload part request + */ + protected void setOptionalUploadPartRequestParameters( + UploadPartRequest request) { + generateSSECustomerKey().ifPresent(request::setSSECustomerKey); + } + + /** + * Sets server side encryption parameters to the GET reuquest. + * request when encryption is enabled. + * @param request upload part request + */ + protected void setOptionalGetObjectMetadataParameters( + GetObjectMetadataRequest request) { + generateSSECustomerKey().ifPresent(request::setSSECustomerKey); + } + + /** + * Set the optional parameters when initiating the request (encryption, + * headers, storage, etc). + * @param request request to patch. + */ + protected void setOptionalMultipartUploadRequestParameters( + InitiateMultipartUploadRequest request) { + generateSSEAwsKeyParams().ifPresent(request::setSSEAwsKeyManagementParams); + generateSSECustomerKey().ifPresent(request::setSSECustomerKey); + } + + /** + * Set the optional parameters for a PUT request. + * @param request request to patch. + */ + protected void setOptionalPutRequestParameters(PutObjectRequest request) { + generateSSEAwsKeyParams().ifPresent(request::setSSEAwsKeyManagementParams); + generateSSECustomerKey().ifPresent(request::setSSECustomerKey); + } + + /** + * Set the optional metadata for an object being created or copied. + * @param metadata to update. + */ + protected void setOptionalObjectMetadata(ObjectMetadata metadata) { + final S3AEncryptionMethods algorithm + = getServerSideEncryptionAlgorithm(); + if (S3AEncryptionMethods.SSE_S3 == algorithm) { + metadata.setSSEAlgorithm(algorithm.getMethod()); + } + } + + /** + * Create a new object metadata instance. + * Any standard metadata headers are added here, for example: + * encryption. + * + * @param length length of data to set in header; Ignored if negative + * @return a new metadata instance + */ + @Override + public ObjectMetadata newObjectMetadata(long length) { + final ObjectMetadata om = new ObjectMetadata(); + setOptionalObjectMetadata(om); + if (length >= 0) { + om.setContentLength(length); + } + return om; + } + + @Override + public CopyObjectRequest newCopyObjectRequest(String srcKey, + String dstKey, + ObjectMetadata srcom) { + CopyObjectRequest copyObjectRequest = + new CopyObjectRequest(getBucket(), srcKey, getBucket(), dstKey); + ObjectMetadata dstom = newObjectMetadata(srcom.getContentLength()); + HeaderProcessing.cloneObjectMetadata(srcom, dstom); + setOptionalObjectMetadata(dstom); + copyEncryptionParameters(srcom, copyObjectRequest); + copyObjectRequest.setCannedAccessControlList(cannedACL); + copyObjectRequest.setNewObjectMetadata(dstom); + Optional.ofNullable(srcom.getStorageClass()) + .ifPresent(copyObjectRequest::setStorageClass); + return prepareRequest(copyObjectRequest); + } + + /** + * Propagate encryption parameters from source file if set else use the + * current filesystem encryption settings. + * @param srcom source object metadata. + * @param copyObjectRequest copy object request body. + */ + protected void copyEncryptionParameters( + ObjectMetadata srcom, + CopyObjectRequest copyObjectRequest) { + String sourceKMSId = srcom.getSSEAwsKmsKeyId(); + if (isNotEmpty(sourceKMSId)) { + // source KMS ID is propagated + LOG.debug("Propagating SSE-KMS settings from source {}", + sourceKMSId); + copyObjectRequest.setSSEAwsKeyManagementParams( + new SSEAwsKeyManagementParams(sourceKMSId)); + } + switch (getServerSideEncryptionAlgorithm()) { + case SSE_S3: + /* no-op; this is set in destination object metadata */ + break; + + case SSE_C: + generateSSECustomerKey().ifPresent(customerKey -> { + copyObjectRequest.setSourceSSECustomerKey(customerKey); + copyObjectRequest.setDestinationSSECustomerKey(customerKey); + }); + break; + + case SSE_KMS: + generateSSEAwsKeyParams().ifPresent( + copyObjectRequest::setSSEAwsKeyManagementParams); + break; + default: + } + } + /** + * Create a putObject request. + * Adds the ACL and metadata + * @param key key of object + * @param metadata metadata header + * @param srcfile source file + * @return the request + */ + @Override + public PutObjectRequest newPutObjectRequest(String key, + ObjectMetadata metadata, File srcfile) { + Preconditions.checkNotNull(srcfile); + PutObjectRequest putObjectRequest = new PutObjectRequest(getBucket(), key, + srcfile); + setOptionalPutRequestParameters(putObjectRequest); + putObjectRequest.setCannedAcl(cannedACL); + putObjectRequest.setMetadata(metadata); + return prepareRequest(putObjectRequest); + } + + /** + * Create a {@link PutObjectRequest} request. + * The metadata is assumed to have been configured with the size of the + * operation. + * @param key key of object + * @param metadata metadata header + * @param inputStream source data. + * @return the request + */ + @Override + public PutObjectRequest newPutObjectRequest(String key, + ObjectMetadata metadata, + InputStream inputStream) { + Preconditions.checkNotNull(inputStream); + Preconditions.checkArgument(isNotEmpty(key), "Null/empty key"); + PutObjectRequest putObjectRequest = new PutObjectRequest(getBucket(), key, + inputStream, metadata); + setOptionalPutRequestParameters(putObjectRequest); + putObjectRequest.setCannedAcl(cannedACL); + return prepareRequest(putObjectRequest); + } + + @Override + public PutObjectRequest newDirectoryMarkerRequest(String directory) { + String key = directory.endsWith("/") + ? directory + : (directory + "/"); + // an input stream which is laways empty + final InputStream im = new InputStream() { + @Override + public int read() throws IOException { + return -1; + } + }; + // preparation happens in here + final ObjectMetadata md = newObjectMetadata(0L); + md.setContentType(HeaderProcessing.CONTENT_TYPE_X_DIRECTORY); + PutObjectRequest putObjectRequest = + newPutObjectRequest(key, md, im); + return putObjectRequest; + } + + @Override + public ListMultipartUploadsRequest + newListMultipartUploadsRequest(String prefix) { + ListMultipartUploadsRequest request = new ListMultipartUploadsRequest( + getBucket()); + if (prefix != null) { + request.setPrefix(prefix); + } + return prepareRequest(request); + } + + @Override + public AbortMultipartUploadRequest newAbortMultipartUploadRequest( + String destKey, + String uploadId) { + return prepareRequest(new AbortMultipartUploadRequest(getBucket(), + destKey, + uploadId)); + } + + @Override + public InitiateMultipartUploadRequest newMultipartUploadRequest( + String destKey) { + final InitiateMultipartUploadRequest initiateMPURequest = + new InitiateMultipartUploadRequest(getBucket(), + destKey, + newObjectMetadata(-1)); + initiateMPURequest.setCannedACL(getCannedACL()); + setOptionalMultipartUploadRequestParameters(initiateMPURequest); + return prepareRequest(initiateMPURequest); + } + + @Override + public CompleteMultipartUploadRequest newCompleteMultipartUploadRequest( + String destKey, + String uploadId, + List partETags) { + // a copy of the list is required, so that the AWS SDK doesn't + // attempt to sort an unmodifiable list. + return prepareRequest(new CompleteMultipartUploadRequest(bucket, + destKey, uploadId, new ArrayList<>(partETags))); + + } + + @Override + public GetObjectMetadataRequest newGetObjectMetadataRequest(String key) { + GetObjectMetadataRequest request = + new GetObjectMetadataRequest(getBucket(), key); + //SSE-C requires to be filled in if enabled for object metadata + setOptionalGetObjectMetadataParameters(request); + return prepareRequest(request); + } + + @Override + public GetObjectRequest newGetObjectRequest(String key) { + GetObjectRequest request = new GetObjectRequest(bucket, key); + generateSSECustomerKey().ifPresent(request::setSSECustomerKey); + + return prepareRequest(request); + } + + @Override + public UploadPartRequest newUploadPartRequest( + String destKey, + String uploadId, + int partNumber, + int size, + InputStream uploadStream, + File sourceFile, + long offset) throws PathIOException { + checkNotNull(uploadId); + // exactly one source must be set; xor verifies this + checkArgument((uploadStream != null) ^ (sourceFile != null), + "Data source"); + checkArgument(size >= 0, "Invalid partition size %s", size); + checkArgument(partNumber > 0, + "partNumber must be between 1 and %s inclusive, but is %s", + DEFAULT_UPLOAD_PART_COUNT_LIMIT, partNumber); + + LOG.debug("Creating part upload request for {} #{} size {}", + uploadId, partNumber, size); + final String pathErrorMsg = "Number of parts in multipart upload exceeded." + + " Current part count = %s, Part count limit = %s "; + if (partNumber > multipartPartCountLimit) { + throw new PathIOException(destKey, + String.format(pathErrorMsg, partNumber, multipartPartCountLimit)); + } + UploadPartRequest request = new UploadPartRequest() + .withBucketName(getBucket()) + .withKey(destKey) + .withUploadId(uploadId) + .withPartNumber(partNumber) + .withPartSize(size); + if (uploadStream != null) { + // there's an upload stream. Bind to it. + request.setInputStream(uploadStream); + } else { + checkArgument(sourceFile.exists(), + "Source file does not exist: %s", sourceFile); + checkArgument(sourceFile.isFile(), + "Source is not a file: %s", sourceFile); + checkArgument(offset >= 0, "Invalid offset %s", offset); + long length = sourceFile.length(); + checkArgument(offset == 0 || offset < length, + "Offset %s beyond length of file %s", offset, length); + request.setFile(sourceFile); + request.setFileOffset(offset); + } + setOptionalUploadPartRequestParameters(request); + return prepareRequest(request); + } + + @Override + public SelectObjectContentRequest newSelectRequest(String key) { + SelectObjectContentRequest request = new SelectObjectContentRequest(); + request.setBucketName(bucket); + request.setKey(key); + generateSSECustomerKey().ifPresent(request::setSSECustomerKey); + return prepareRequest(request); + } + + @Override + public ListObjectsRequest newListObjectsV1Request( + final String key, + final String delimiter, + final int maxKeys) { + ListObjectsRequest request = new ListObjectsRequest() + .withBucketName(bucket) + .withMaxKeys(maxKeys) + .withPrefix(key); + if (delimiter != null) { + request.setDelimiter(delimiter); + } + return prepareRequest(request); + } + + @Override + public ListNextBatchOfObjectsRequest newListNextBatchOfObjectsRequest( + ObjectListing prev) { + return prepareRequest(new ListNextBatchOfObjectsRequest(prev)); + } + + @Override + public ListObjectsV2Request newListObjectsV2Request( + final String key, + final String delimiter, + final int maxKeys) { + final ListObjectsV2Request request = new ListObjectsV2Request() + .withBucketName(bucket) + .withMaxKeys(maxKeys) + .withPrefix(key); + if (delimiter != null) { + request.setDelimiter(delimiter); + } + return prepareRequest(request); + } + + @Override + public DeleteObjectRequest newDeleteObjectRequest(String key) { + return prepareRequest(new DeleteObjectRequest(bucket, key)); + } + + @Override + public DeleteObjectsRequest newBulkDeleteRequest( + List keysToDelete, + boolean quiet) { + return prepareRequest( + new DeleteObjectsRequest(bucket) + .withKeys(keysToDelete) + .withQuiet(quiet)); + } + + @Override + public void setEncryptionSecrets(final EncryptionSecrets secrets) { + encryptionSecrets = secrets; + } + + /** + * Create a builder. + * @return new builder. + */ + public static RequestFactoryBuilder builder() { + return new RequestFactoryBuilder(); + } + + /** + * Builder. + */ + public static final class RequestFactoryBuilder { + + /** + * Target bucket. + */ + private String bucket; + + /** + * Encryption secrets. + */ + private EncryptionSecrets encryptionSecrets = new EncryptionSecrets(); + + /** + * ACL For new objects. + */ + private CannedAccessControlList cannedACL = null; + + /** Requester Pays flag. */ + private boolean requesterPays = false; + + /** + * Multipart limit. + */ + private long multipartPartCountLimit = DEFAULT_UPLOAD_PART_COUNT_LIMIT; + + /** + * Callback to prepare requests. + */ + private PrepareRequest requestPreparer; + + private RequestFactoryBuilder() { + } + + /** + * Build the request factory. + * @return the factory + */ + public RequestFactory build() { + return new RequestFactoryImpl(this); + } + + /** + * Target bucket. + * @param value new value + * @return the builder + */ + public RequestFactoryBuilder withBucket(final String value) { + bucket = value; + return this; + } + + /** + * Encryption secrets. + * @param value new value + * @return the builder + */ + public RequestFactoryBuilder withEncryptionSecrets( + final EncryptionSecrets value) { + encryptionSecrets = value; + return this; + } + + /** + * ACL For new objects. + * @param value new value + * @return the builder + */ + public RequestFactoryBuilder withCannedACL( + final CannedAccessControlList value) { + cannedACL = value; + return this; + } + + /** + * Requester Pays flag. + * @param value new value + * @return the builder + */ + public RequestFactoryBuilder withRequesterPays( + final boolean value) { + requesterPays = value; + return this; + } + + /** + * Multipart limit. + * @param value new value + * @return the builder + */ + public RequestFactoryBuilder withMultipartPartCountLimit( + final long value) { + multipartPartCountLimit = value; + return this; + } + + /** + * Callback to prepare requests. + * + * @param value new value + * @return the builder + */ + public RequestFactoryBuilder withRequestPreparer( + final PrepareRequest value) { + this.requestPreparer = value; + return this; + } + } + + /** + * This is a callback for anything to "prepare" every request + * after creation. The S3AFileSystem's Audit Manager is expected + * to be wired up via this call so can audit/prepare requests + * after their creation. + */ + @FunctionalInterface + public interface PrepareRequest { + + /** + * Post-creation preparation of AWS request. + * @param t request + * @param request type. + * @return prepared entry. + */ + @Retries.OnceRaw + T prepareRequest(T t); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java index 88231d8af9c04..ac29780dcbaec 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java @@ -32,6 +32,8 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; import org.apache.hadoop.fs.s3a.Invoker; import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.S3AInputPolicy; @@ -40,6 +42,9 @@ import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; +import org.apache.hadoop.fs.store.audit.ActiveThreadSpanSource; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.fs.store.audit.AuditSpanSource; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.util.SemaphoredDelegatingExecutor; @@ -59,7 +64,7 @@ */ @InterfaceAudience.LimitedPrivate("S3A Filesystem and extensions") @InterfaceStability.Unstable -public class StoreContext { +public class StoreContext implements ActiveThreadSpanSource { /** Filesystem URI. */ private final URI fsURI; @@ -117,13 +122,17 @@ public class StoreContext { /** * Source of time. */ - private ITtlTimeProvider timeProvider; + + /** Time source for S3Guard TTLs. */ + private final ITtlTimeProvider timeProvider; + + /** Operation Auditor. */ + private final AuditSpanSource auditor; /** * Instantiate. - * @deprecated as public method: use {@link StoreContextBuilder}. */ - public StoreContext( + StoreContext( final URI fsURI, final String bucket, final Configuration configuration, @@ -140,13 +149,17 @@ public StoreContext( final MetadataStore metadataStore, final boolean useListV1, final ContextAccessors contextAccessors, - final ITtlTimeProvider timeProvider) { + final ITtlTimeProvider timeProvider, + final AuditSpanSource auditor) { this.fsURI = fsURI; this.bucket = bucket; this.configuration = configuration; this.username = username; this.owner = owner; - this.executor = MoreExecutors.listeningDecorator(executor); + // some mock tests have a null executor pool + this.executor = executor !=null + ? MoreExecutors.listeningDecorator(executor) + : null; this.executorCapacity = executorCapacity; this.invoker = invoker; this.instrumentation = instrumentation; @@ -158,11 +171,7 @@ public StoreContext( this.useListV1 = useListV1; this.contextAccessors = contextAccessors; this.timeProvider = timeProvider; - } - - @Override - protected Object clone() throws CloneNotSupportedException { - return super.clone(); + this.auditor = auditor; } public URI getFsURI() { @@ -391,4 +400,33 @@ public CompletableFuture submit( LambdaUtils.eval(future, call)); return future; } + + /** + * Get the auditor. + * @return auditor. + */ + public AuditSpanSource getAuditor() { + return auditor; + } + + /** + * Return the active audit span. + * This is thread local -it MUST be passed into workers. + * To ensure the correct span is used, it SHOULD be + * collected as early as possible, ideally during construction/ + * or service init/start. + * @return active audit span. + */ + @Override + public AuditSpan getActiveAuditSpan() { + return contextAccessors.getActiveAuditSpan(); + } + + /** + * Get the request factory. + * @return the factory for requests. + */ + public RequestFactory getRequestFactory() { + return contextAccessors.getRequestFactory(); + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java index 13953f9c985f3..468af1bb77c8b 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java @@ -25,9 +25,11 @@ import org.apache.hadoop.fs.s3a.Invoker; import org.apache.hadoop.fs.s3a.S3AInputPolicy; import org.apache.hadoop.fs.s3a.S3AStorageStatistics; +import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; +import org.apache.hadoop.fs.store.audit.AuditSpanSource; import org.apache.hadoop.security.UserGroupInformation; /** @@ -69,6 +71,8 @@ public class StoreContextBuilder { private ITtlTimeProvider timeProvider; + private AuditSpanSource auditor; + public StoreContextBuilder setFsURI(final URI fsURI) { this.fsURI = fsURI; return this; @@ -165,6 +169,17 @@ public StoreContextBuilder setTimeProvider( return this; } + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public StoreContextBuilder setAuditor( + final AuditSpanSource value) { + auditor = value; + return this; + } + @SuppressWarnings("deprecation") public StoreContext build() { return new StoreContext(fsURI, @@ -183,6 +198,7 @@ public StoreContext build() { metadataStore, useListV1, contextAccessors, - timeProvider); + timeProvider, + auditor); } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java index 20bd250da0c38..2a7cb4c1b7247 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java @@ -52,6 +52,7 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ALocatedFileStatus; import org.apache.hadoop.fs.s3a.S3ListRequest; +import org.apache.hadoop.fs.store.audit.AuditSpan; import org.apache.hadoop.service.Service; import org.apache.hadoop.service.launcher.LauncherExitCodes; import org.apache.hadoop.service.launcher.ServiceLaunchException; @@ -347,21 +348,26 @@ protected long listStatusFilesystem( protected long dumpRawS3ObjectStore( final CsvFile csv) throws IOException { S3AFileSystem fs = getFilesystem(); - Path rootPath = fs.qualify(new Path("/")); - Listing listing = fs.getListing(); - S3ListRequest request = listing.createListObjectsRequest("", null); long count = 0; - RemoteIterator st = - listing.createFileStatusListingIterator(rootPath, request, - ACCEPT_ALL, - new Listing.AcceptAllButSelfAndS3nDirs(rootPath)); - while (st.hasNext()) { - count++; - S3AFileStatus next = st.next(); - LOG.debug("[{}] {}", count, next); - csv.entry(next); + Path rootPath = fs.qualify(new Path("/")); + try (AuditSpan span = fs.createSpan("DumpS3GuardDynamoTable", + rootPath.toString(), null)) { + Listing listing = fs.getListing(); + S3ListRequest request = listing.createListObjectsRequest("", null, span); + count = 0; + RemoteIterator st = + listing.createFileStatusListingIterator(rootPath, request, + ACCEPT_ALL, + new Listing.AcceptAllButSelfAndS3nDirs(rootPath), + span); + while (st.hasNext()) { + count++; + S3AFileStatus next = st.next(); + LOG.debug("[{}] {}", count, next); + csv.entry(next); + } + LOG.info("entry count: {}", count); } - LOG.info("entry count: {}", count); return count; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index b963e7e2532e5..e4542eb1bcfa6 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -63,6 +63,8 @@ import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription; import com.amazonaws.services.dynamodbv2.model.TableDescription; import com.amazonaws.services.dynamodbv2.model.WriteRequest; + +import org.apache.hadoop.fs.s3a.impl.InternalConstants; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; @@ -593,7 +595,7 @@ private void innerDelete(final Path path, } // the policy on whether repeating delete operations is based // on that of S3A itself - boolean idempotent = S3AFileSystem.DELETE_CONSIDERED_IDEMPOTENT; + boolean idempotent = InternalConstants.DELETE_CONSIDERED_IDEMPOTENT; if (tombstone) { Preconditions.checkArgument(ttlTimeProvider != null, "ttlTimeProvider " + "must not be null"); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java index 2acae76875a22..ef4756435533a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java @@ -30,7 +30,6 @@ import java.util.Date; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -55,11 +54,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.s3a.MultipartUtils; import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3AUtils; +import org.apache.hadoop.fs.s3a.WriteOperationHelper; import org.apache.hadoop.fs.s3a.auth.RolePolicies; import org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens; import org.apache.hadoop.fs.s3a.commit.CommitConstants; @@ -69,6 +68,8 @@ import org.apache.hadoop.fs.s3a.select.SelectTool; import org.apache.hadoop.fs.s3a.tools.MarkerTool; import org.apache.hadoop.fs.shell.CommandFormat; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.store.audit.AuditSpan; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ExitCodeProvider; @@ -84,6 +85,9 @@ import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; import static org.apache.hadoop.fs.s3a.commit.staging.StagingCommitterConstants.FILESYSTEM_TEMP_PATH; import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStoreTableManager.SSE_DEFAULT_MASTER_KEY; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.MULTIPART_UPLOAD_ABORTED; import static org.apache.hadoop.service.launcher.LauncherExitCodes.*; /** @@ -531,16 +535,13 @@ protected void dumpFileSystemStatistics(PrintStream stream) { if (fs == null) { return; } - println(stream, "%nStorage Statistics for %s%n", fs.getUri()); - StorageStatistics st = fs.getStorageStatistics(); - Iterator it - = st.getLongStatistics(); - while (it.hasNext()) { - StorageStatistics.LongStatistic next = it.next(); - long value = next.getValue(); - if (value != 0) { - println(stream, "%s\t%s", next.getName(), value); - } + println(stream, "%nIO Statistics for %s%n", fs.getUri()); + final IOStatistics iostats = retrieveIOStatistics(fs); + if (iostats != null) { + println(stream, ioStatisticsToPrettyString(iostats)); + + } else { + println(stream, "FileSystem does not provide IOStatistics"); } println(stream, ""); } @@ -890,15 +891,18 @@ public int run(String[] args, PrintStream out) throws Exception { final CommandFormat commandFormat = getCommandFormat(); + final boolean verbose = commandFormat.getOpt(VERBOSE); final ImportOperation importer = new ImportOperation( getFilesystem(), getStore(), status, commandFormat.getOpt(AUTH_FLAG), - commandFormat.getOpt(VERBOSE)); + verbose); long items = importer.execute(); println(out, "Inserted %d items into Metadata Store", items); - + if (verbose) { + dumpFileSystemStatistics(out); + } return SUCCESS; } @@ -1584,9 +1588,12 @@ public int run(String[] args, PrintStream out) throw invalidArgs("No options specified"); } processArgs(paths, out); + println(out, "Listing uploads under path \"%s\"", prefix); promptBeforeAbort(out); processUploads(out); - + if (verbose) { + dumpFileSystemStatistics(out); + } out.flush(); return SUCCESS; } @@ -1605,8 +1612,15 @@ private void promptBeforeAbort(PrintStream out) throws IOException { } private void processUploads(PrintStream out) throws IOException { - MultipartUtils.UploadIterator uploads; - uploads = getFilesystem().listUploads(prefix); + final S3AFileSystem fs = getFilesystem(); + MultipartUtils.UploadIterator uploads = fs.listUploads(prefix); + // create a span so that the write operation helper + // is within one + AuditSpan span = + fs.createSpan(MULTIPART_UPLOAD_ABORTED, + prefix, null); + final WriteOperationHelper writeOperationHelper + = fs.getWriteOperationHelper(); int count = 0; while (uploads.hasNext()) { @@ -1620,18 +1634,20 @@ private void processUploads(PrintStream out) throws IOException { upload.getKey(), upload.getUploadId()); } if (mode == Mode.ABORT) { - getFilesystem().getWriteOperationHelper() + writeOperationHelper .abortMultipartUpload(upload.getKey(), upload.getUploadId(), true, LOG_EVENT); } } + span.deactivate(); if (mode != Mode.EXPECT || verbose) { println(out, "%s %d uploads %s.", TOTAL, count, mode == Mode.ABORT ? "deleted" : "found"); } if (mode == Mode.EXPECT) { if (count != expectedCount) { - throw badState("Expected %d uploads, found %d", expectedCount, count); + throw badState("Expected upload count under %s: %d, found %d", + prefix, expectedCount, count); } } } @@ -1643,6 +1659,9 @@ private void processUploads(PrintStream out) throws IOException { * @return true iff u was created at least age milliseconds ago. */ private boolean olderThan(MultipartUpload u, long msec) { + if (msec == 0) { + return true; + } Date ageDate = new Date(System.currentTimeMillis() - msec); return ageDate.compareTo(u.getInitiated()) >= 0; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectBinding.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectBinding.java index 20dc00fbc06a1..150043aea9f1a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectBinding.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectBinding.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Locale; -import java.util.Optional; import com.amazonaws.services.s3.model.CSVInput; import com.amazonaws.services.s3.model.CSVOutput; @@ -28,7 +27,6 @@ import com.amazonaws.services.s3.model.InputSerialization; import com.amazonaws.services.s3.model.OutputSerialization; import com.amazonaws.services.s3.model.QuoteFields; -import com.amazonaws.services.s3.model.SSECustomerKey; import com.amazonaws.services.s3.model.SelectObjectContentRequest; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; @@ -55,6 +53,7 @@ * This class is intended to be instantiated by the owning S3AFileSystem * instance to handle the construction of requests: IO is still done exclusively * in the filesystem. + * */ public class SelectBinding { @@ -70,12 +69,12 @@ public class SelectBinding { /** * Constructor. - * @param operations owning FS. + * @param operations callback to owner FS, with associated span. */ public SelectBinding(final WriteOperationHelper operations) { this.operations = checkNotNull(operations); Configuration conf = getConf(); - this.enabled = conf.getBoolean(FS_S3A_SELECT_ENABLED, true); + this.enabled = isSelectEnabled(conf); this.errorsIncludeSql = conf.getBoolean(SELECT_ERRORS_INCLUDE_SQL, false); } @@ -91,12 +90,20 @@ public boolean isEnabled() { return enabled; } + /** + * Static probe for select being enabled. + * @param conf configuration + * @return true iff select is enabled. + */ + public static boolean isSelectEnabled(Configuration conf) { + return conf.getBoolean(FS_S3A_SELECT_ENABLED, true); + } + /** * Build and execute a select request. * @param readContext the read context, which includes the source path. * @param expression the SQL expression. * @param builderOptions query options - * @param sseKey optional SSE customer key * @param objectAttributes object attributes from a HEAD request * @return an FSDataInputStream whose wrapped stream is a SelectInputStream * @throws IllegalArgumentException argument failure @@ -108,7 +115,6 @@ public FSDataInputStream select( final S3AReadOpContext readContext, final String expression, final Configuration builderOptions, - final Optional sseKey, final S3ObjectAttributes objectAttributes) throws IOException { return new FSDataInputStream( @@ -118,8 +124,8 @@ public FSDataInputStream select( buildSelectRequest( readContext.getPath(), expression, - builderOptions, - sseKey))); + builderOptions + ))); } /** @@ -127,7 +133,6 @@ public FSDataInputStream select( * @param path source path. * @param expression the SQL expression. * @param builderOptions config to extract other query options from - * @param sseKey optional SSE customer key * @return the request to serve * @throws IllegalArgumentException argument failure * @throws IOException problem building/validating the request @@ -135,16 +140,13 @@ public FSDataInputStream select( public SelectObjectContentRequest buildSelectRequest( final Path path, final String expression, - final Configuration builderOptions, - final Optional sseKey) + final Configuration builderOptions) throws IOException { Preconditions.checkState(isEnabled(), "S3 Select is not enabled for %s", path); SelectObjectContentRequest request = operations.newSelectRequest(path); buildRequest(request, expression, builderOptions); - // optionally set an SSE key in the input - sseKey.ifPresent(request::withSSECustomerKey); return request; } @@ -428,4 +430,5 @@ static String expandBackslashChars(String src) { .replace("\\\\", "\\"); } + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/ForwardingIOStatisticsStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/ForwardingIOStatisticsStore.java new file mode 100644 index 0000000000000..612460835403e --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/ForwardingIOStatisticsStore.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics.impl; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.MeanStatistic; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; + +/** + * This may seem odd having an IOStatisticsStore which does nothing + * but forward to a wrapped store, but it's designed to + * assist in subclassing of selective methods, such + * as those to increment counters, get durations etc. + */ +public class ForwardingIOStatisticsStore implements IOStatisticsStore { + + private final IOStatisticsStore innerStatistics; + + public ForwardingIOStatisticsStore( + final IOStatisticsStore innerStatistics) { + this.innerStatistics = innerStatistics; + } + + protected IOStatisticsStore getInnerStatistics() { + return innerStatistics; + } + + @Override + public Map counters() { + return getInnerStatistics().counters(); + } + + @Override + public Map gauges() { + return getInnerStatistics().gauges(); + } + + @Override + public Map minimums() { + return getInnerStatistics().minimums(); + } + + @Override + public Map maximums() { + return getInnerStatistics().maximums(); + } + + @Override + public Map meanStatistics() { + return getInnerStatistics().meanStatistics(); + } + + @Override + public boolean aggregate(@Nullable final IOStatistics statistics) { + return getInnerStatistics().aggregate(statistics); + } + + @Override + public long incrementCounter(final String key, final long value) { + return getInnerStatistics().incrementCounter(key, value); + } + + @Override + public void setCounter(final String key, final long value) { + getInnerStatistics().setCounter(key, value); + } + + @Override + public void setGauge(final String key, final long value) { + getInnerStatistics().setGauge(key, value); + } + + @Override + public long incrementGauge(final String key, final long value) { + return getInnerStatistics().incrementGauge(key, value); + } + + @Override + public void setMaximum(final String key, final long value) { + getInnerStatistics().setMaximum(key, value); + } + + @Override + public long incrementMaximum(final String key, final long value) { + return getInnerStatistics().incrementMaximum(key, value); + } + + @Override + public void setMinimum(final String key, final long value) { + getInnerStatistics().setMinimum(key, value); + + } + + @Override + public long incrementMinimum(final String key, final long value) { + return getInnerStatistics().incrementMinimum(key, value); + + } + + @Override + public void addMinimumSample(final String key, final long value) { + getInnerStatistics().addMinimumSample(key, value); + + } + + @Override + public void addMaximumSample(final String key, final long value) { + getInnerStatistics().addMaximumSample(key, value); + } + + @Override + public void setMeanStatistic(final String key, final MeanStatistic value) { + getInnerStatistics().setMeanStatistic(key, value); + + } + + @Override + public void addMeanStatisticSample(final String key, final long value) { + getInnerStatistics().addMeanStatisticSample(key, value); + + } + + @Override + public void reset() { + getInnerStatistics().reset(); + } + + @Override + public AtomicLong getCounterReference(final String key) { + return getInnerStatistics().getCounterReference(key); + } + + @Override + public AtomicLong getMaximumReference(final String key) { + return getInnerStatistics().getMaximumReference(key); + } + + @Override + public AtomicLong getMinimumReference(final String key) { + return getInnerStatistics().getMinimumReference(key); + } + + @Override + public AtomicLong getGaugeReference(final String key) { + return getInnerStatistics().getGaugeReference(key); + } + + @Override + public MeanStatistic getMeanStatistic(final String key) { + return getInnerStatistics().getMeanStatistic(key); + } + + @Override + public void addTimedOperation(final String prefix, + final long durationMillis) { + getInnerStatistics().addTimedOperation(prefix, durationMillis); + + } + + @Override + public void addTimedOperation(final String prefix, + final Duration duration) { + getInnerStatistics().addTimedOperation(prefix, duration); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java index 6d386f250e6cf..a4d07c6c5f125 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java @@ -36,6 +36,7 @@ import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.MultiObjectDeleteException; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,7 +58,6 @@ import org.apache.hadoop.fs.s3a.impl.StoreContext; import org.apache.hadoop.fs.s3a.s3guard.S3GuardTool; import org.apache.hadoop.fs.shell.CommandFormat; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.ExitUtil; @@ -363,7 +363,6 @@ ScanResult execute(final ScanArgs scanArgs) // extract the callbacks needed for the rest of the work storeContext = fs.createStoreContext(); - operations = fs.createMarkerToolOperations(); // filesystem policy. // if the -nonauth option is set, this is used to filter // out surplus markers from the results. @@ -417,13 +416,15 @@ ScanResult execute(final ScanArgs scanArgs) minMarkerCount = maxMarkerCount; maxMarkerCount = m; } - ScanResult result = scan(target, + // extract the callbacks needed for the rest of the work + operations = fs.createMarkerToolOperations( + target.toString()); + return scan(target, scanArgs.isDoPurge(), minMarkerCount, maxMarkerCount, scanArgs.getLimit(), filterPolicy); - return result; } /** diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/auditing.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/auditing.md new file mode 100644 index 0000000000000..15a6470c5504c --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/auditing.md @@ -0,0 +1,389 @@ + + +# Object Store Auditing + + +The S3A connector provides an extension point for auditing requests to S3. +The auditing can take place at the entry point to every FS operation, +and inside the AWS S3 SDK, immediately before the request is executed. + +The full architecture is covered in [Auditing Architecture](auditing_architecture.html); +this document covers its use. + +## Auditing workflow + +1. An _Auditor Service_ can be instantiated for each S3A FileSystem instance, + created during FS initialization and closed when the FS instance is closed. +1. The S3A FS will request from the Auditor Service an _Audit Span_ for each Hadoop FileSystem API call. +1. The audit span will have callbacks invoked during each of the S3 operations + invoked during the execution of the API call, *from within the AWS SDK* +1. This allows the Auditor Service to log requests made and associate with users and operations. +1. And/or reject operations. +1. The bundled "Logging Auditor" logs operations and attaches information about calls to the HTTP Referrer header. +1. So aiding debugging of issues related to performance, bucket load, S3 costs...etc. + +Thus: an Auditor Service can be plugged in to provide (best-effort) auditing as well +as hinted allow/deny security. + +* Why best effort: coverage is not complete. See limitations below. +* Why "hinted" security? Any custom code running in the JVM could retrieve the AWS + credential chain and so bypass this auditing mechanism. + +## Limitations + +This is not a means of controlling access to S3 resources. It is a best-effort +attempt at supporting logging of FileSystem operations API calls, and, in +particular, correlating S3 multiple object requests with a single FS API call, +ideally even identifying the process/job generating load. + +* Low-level code using public S3A methods intended only for internal use may not + create spans. +* Code which asks for the AWS S3 client may bypass span creation. +* Application code can also create a new S3 client (reusing any existing + credentials) + and so have unaudited access to S3. +* There's no tie-up with OpenTelemetry. +* Uploads and copy operations through the TransferManager do not pick up an + active span because work is executed in threads which the S3A code cannot + update. +* There's a limit to how long an http referer header can be; operations on long + paths may be incompletely logged. + +## Using Auditing + +The Logging Auditor is enabled by default; it annotates the S3 logs. + +### Auditor Options + +| Option | Meaning | Default Value | +|--------|---------|---------------| +| `fs.s3a.audit.service.classname` | Auditor classname | `org.apache.hadoop.fs.s3a.audit.impl.LoggingAuditor` | +| `fs.s3a.audit.request.handlers` | List of extra subclasses of AWS SDK RequestHandler2 to include in handler chain | `""` | +| `fs.s3a.audit.referrer.enabled` | Logging auditor to publish the audit information in the HTTP Referrer header | `true` | +| `fs.s3a.audit.referrer.filter` | List of audit fields to filter | `""` | +| `fs.s3a.audit.reject.out.of.span.operations` | Auditor to reject operations "outside of a span" | `false` | + + +### Disabling Auditing with the No-op Auditor + +The No-op auditor does not perform any logging of audit events. + +```xml + + fs.s3a.audit.service.classname + org.apache.hadoop.fs.s3a.audit.impl.NoopAuditor + +``` + +### Auditing with the Logging Auditor + +The "Logging Auditor" is the default auditor. +It provides two forms of logging + +1. Logging of operations in the client via Log4J. +1. Dynamic generation of the HTTP Referrer header for S3 requests. + +The Logging Auditor is enabled by providing its classname in the option +`fs.s3a.audit.service.classname`. + +```xml + + fs.s3a.audit.service.classname + org.apache.hadoop.fs.s3a.audit.impl.LoggingAuditor + +``` + + +To print auditing events in the local client logs, set the associated Log4J log +to log at debug: + +``` +# Auditing +log4j.logger.org.apache.hadoop.fs.s3a.audit.impl.LoggingAuditor=DEBUG +``` + + +### Integration with S3 Server Access Logging + +An AWS S3 bucket can be configured to store logs of all HTTP requests made of a bucket +into a different S3 bucket, +[S3 Server Access Logging](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) +In the logging auditor the HTTP `referer` field of every AWS S3 request is built +up into a URL which provides context and span information. As this field is +saved in the S3 logs, if S3 bucket logging is enabled, the logs will be able to +correlate access by S3 clients to the actual operations taking place. + +Note: this logging is described as "Best Effort". There's no guarantee as to +when logs arrive. + +### Rejecting out-of-span operations + +The logging auditor can be configured to raise an exception whenever +a request is made to S3 outside an audited span -that is: the thread +interacting with S3 through the `S3AFileSystem` instance which created +the auditor does not have any span activated. + +This is primarily for development, as it can be used to guarantee +spans are being entered through the public API calls. + +```xml + + fs.s3a.audit.reject.out.of.span.operations + true + +``` + +This rejection process is disabled for some AWS S3 Request classes, +which are created within the AWS SDK as part of larger operations +and for which spans cannot be attached. + +| AWS Request Always allowed | Reason | +|----------------------------|--------| +| `GetBucketLocationRequest` | Used in AWS SDK to determine S3 endpoint | +| `CopyPartRequest` | Used in AWS SDK during copy operations | +| `CompleteMultipartUploadRequest` | Used in AWS SDK to complete copy operations | + +The request to initiate a copy/multipart upload is always audited, +therefore the auditing process does have coverage of rename and multipart +IO. However, the AWS S3 logs will not include full trace information +in the referrer header of the associated copy/complete calls. + + +## Auditing and the HTTP Referrer header + +The HTTP referrer header is attached by the logging auditor. +If the S3 Bucket is configured to log requests to another bucket, then these logs +entries will include the audit information _as the referrer_. + +This can be parsed (consult AWS documentation for a regular expression) +and the http referrer header extracted. + +``` +https://audit.example.org/hadoop/1/op_rename/3c0d9b7e-2a63-43d9-a220-3c574d768ef3-3/ + ?op=op_rename + &p1=s3a://alice-london/path1 + &pr=alice + &p2=s3a://alice-london/path2 + &ps=235865a0-d399-4696-9978-64568db1b51c + &id=3c0d9b7e-2a63-43d9-a220-3c574d768ef3-3 + &t0=12 + &fs=af5943a9-b6f6-4eec-9c58-008982fc492a + &t1=12 + &ts=1617116985923 +``` + +Here are the fields which may be found in a request. +If any of the field values were `null`, the field is omitted. + +| Name | Meaning | Example | +|------|---------|---------| +| `cm` | Command | `S3GuardTool$BucketInfo` | +| `fs` | FileSystem ID | `af5943a9-b6f6-4eec-9c58-008982fc492a` | +| `id` | Span ID | `3c0d9b7e-2a63-43d9-a220-3c574d768ef3-3` | +| `ji` | Job ID | `(Generated by query engine)` | +| `op` | Filesystem API call | `op_rename` | +| `p1` | Path 1 of operation | `s3a://alice-london/path1` | +| `p2` | Path 2 of operation | `s3a://alice-london/path2` | +| `pr` | Principal | `alice` | +| `ps` | Unique process UUID | `235865a0-d399-4696-9978-64568db1b51c` | +| `t0` | Thread 0: thread span was created in | `100` | +| `t1` | Thread 1: thread this operation was executed in | `200` | +| `ts` | Timestamp (UTC epoch millis) | `1617116985923` | + + +Thread IDs are from the current thread in the JVM. + +```java +Long.toString(Thread.currentThread().getId()) +``` + +When `t0` and `t1` are different it means that the span +has been handed off to another thread for work on +behalf of the original operation. +This can be correlated with log entries on the client +to isolate work to specific threads. + +### Limitations of the HTTP Referrer header + +There is a size limit on the length of the header; +operations on long paths may exceed it. +In such situations the audit log is incomplete. + +This is why the span ID is always passed in as part of the URL, +rather than just an HTTP query parameter: even if +the header is chopped, the span ID will always be present. + +## Privacy Implications of HTTP Referrer auditing + +When the S3A client makes requests of an S3 bucket, the auditor +adds span information to the header, which is then +stored in the logs + +If the S3 bucket is owned by the same organization as the client, +this span information is internal to the organization. + +If the S3 bucket is owned/managed by a different entity, +then the span information is visible in any S3 bucket logs +collected by that entity. This includes the principal name +and the command executed if the application is launched via the `Tools` or +service launcher APIs. + +Sharing this information can be disabled by either filtering specific +headers, or by explicitly disabling referrer header generation entirely. + +Note: even when the HTTP Referrer is disabled by or the principal filtered, +AWS S3 logs include ARN of the user or IAM role making the request. + +### Filtering Referrer headers + +Specific fields can be filtered from the referrer header, and so are not +included in the S3A logs. + +```xml + + fs.s3a.audit.referrer.filter + pr, cm + Strip out principal and command from referrer headers + +``` + +### Disabling Referrer headers + +The logging auditor can be configured to not add the referrer header +by setting the option `fs.s3a.audit.referrer.enabled` to `false`, +either globally or for specific buckets: + +```xml + + + fs.s3a.audit.referrer.enabled + false + Disable referrer for all buckets + + + + fs.s3a.bucket.landsat-pds.audit.referrer.enabled + false + Do not add the referrer header to landsat operations + +``` + +## Collecting AWS S3 Logs for Analysis + +The S3 Bucket(s) must be set up for +[Server Access Logging](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html). + +This will tell AWS S3 to collect access logs of all HTTP requests +and store them in a different bucket in the same region. +The logs arrive as files containing a few seconds worth +of log data, stored under the configured path. + +### Enabling logging: Source bucket + +1. Create a separate bucket for logs in the same region, if you do not already have one. +1. In the S3 console, locate the bucket you wish to act as a source for logs, + and go to the "properties". +1. Scroll down to "Server access logging" +1. Select "edit" and then enable logging, entering a path in a nearby bucket for logs. + (Tip: for ease of logging multiple buckets to the same log bucket, use a prefix like + `logs/$BUCKET/log-` to isolate different bucket's logs. + For example, the path log data from `dev data london` could be + `s3://london-log-bucket/logs/dev-data-lon/log-` +1. Save this. + +There's a lag of about an hour between S3 requests being made and the logs +appearing; don't worry during setup if things do not appear to be working. +Enable the log, work with the bucket through the "hadoop fs" command line, wait +an hour, then go and look in the log bucket for the entries. +The log filename includes the time at which these logs +began + +### Keeping costs down by deleting old logs. + +As logs are stored in an S3 bucket, they too run up charges. +Keep costs down by deleting logs after a period of time, and/or +set up a workflow to load and coalesce log entries into a compressed +format and larger files. + +It is straightforward to set up a rule to automatically delete old log files. + +1. In the S3 console, bring up the bucket which is the destination for the logs, + e.g. `london-log-bucket`. +1. Go to the "Management" tab. +1. Add a lifecycle rule (alongside the "abort pending uploads" rule you should already have). +1. Add rule name "Delete old log files". +1. Select "Limit the scope". +1. Add the prefix `logs/` to have it delete all logs of all buckets. + Important: you _must not_ have any leading "/", such as `/logs/` -there will be no + match and the rule will not work. +1. In "Lifecycle rule actions", select "Expire current versions" + This will delete log entries. +1. In "Expire current versions of objects", set the number of days to keep + log entries. +1. Finish by pressing the "Create Rule" button + +Keep an eye on the bucket to make sure the deletion is working; it's easy to +make an error in the prefix, and as logs will be created without limit, +costs will ramp up. + +## Parsing AWS S3 Logs to extract the referrer header + +The [AWS S3 Documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html) +covers the log format and includes a hive external table declaration to work with it. + +The Java pattern regular expression used in the `hadoop-aws` test suites to +extract headers is defined as: + +``` +(?[^ ]*) (?[^ ]*) (?\[(.*?)\]) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?(-|"[^"]*")) (?(-|[0-9]*)) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?(-|"[^"]*")) (?(-|"[^"]*")) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*)*$ +``` + +The class `org.apache.hadoop.fs.s3a.audit.S3LogParser` provides this pattern +as well as constants for each group. It is declared as `Public/Unstable`. + +## Debugging + +The `org.apache.hadoop.fs.s3a.audit` log context contains logs for the different +components implementing auditing. + +Logging of requests audited with the `LoggingAuditService` can be enabled by +setting that log to debug. + +``` +# Log before a request is made to S3 +log4j.logger.org.apache.hadoop.fs.s3a.audit.impl.LoggingAuditor=DEBUG +``` + +This adds one log line per request -and does provide some insight into +communications between the S3A client and AWS S3. + +For low-level debugging of the Auditing system, such as when when spans are +entered and exited, set the log to `TRACE`: + +``` +# log request creation, span lifecycle and other low-level details +log4j.logger.org.apache.hadoop.fs.s3a.audit=TRACE +``` + +This is very noisy and not recommended in normal operation. + +## Integration with S3A Committers + +Work submitted through the S3A committer will have the job (query) ID associated +with S3 operations taking place against all S3A filesystems in that thread. + +For this to be useful, the work performed in a task MUST be in the same thread +which called `jobSetup()` or `taskSetup()` on the committer. + diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/auditing_architecture.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/auditing_architecture.md new file mode 100644 index 0000000000000..50e8fe79fc669 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/auditing_architecture.md @@ -0,0 +1,323 @@ + + +# Object Store Auditing: Architecture + +This the architecture document of the S3A [Auditing](auditing.html) component. + + +The S3A auditing subsystem is defined in the package `org.apache.hadoop.fs.s3a.audit`. + +This package is declared `LimitedPrivate`; some classes inside are explicitly +declared `@Public` (e.g `AuditConstants`) while others `@Private`. If declared +`@Private`, external auditing modules MUST NOT use them. + +```java +@InterfaceAudience.LimitedPrivate("auditing extensions") +@InterfaceStability.Unstable +package org.apache.hadoop.fs.s3a.audit; +``` + +The auditing implementation classes are all in the package +package `org.apache.hadoop.fs.s3a.audit.impl`. +These MUST NOT be subclassed or invoked directly by external code. + +Audit classes/interfaces which are intended to be used across object store +clients and manipulated from other parts of hadoop are in `hadoop-common` JAR in +the package `org.apache.hadoop.fs.store`. + +### Interface `org.apache.hadoop.fs.store.audit.AuditSpan` + +An AuditSpan audits a single Hadoop FileSystem API operation such as +`open(Path)`, `rename(Path, Path)` or `listFiles(Path, Boolean)`. + +```java +public interface AuditSpan extends Closeable { + + String getSpanId(); + + String getOperationName(); + + long getTimestamp(); + + AuditSpan activate(); + + void deactivate(); + + default void close() { + deactivate(); + } + + boolean isValidSpan(); + + void set(String key, String value); +} +``` + +Audit Spans are intended for use _within_ FileSystem clients; that is +not visible to applications invoking them. + +1. One `AuditSpan` is created per Hadoop FS API call. +1. Each span has the name of the operation and optionally source and destination paths. +1. A span may be `activate()`d or `deactivate()`d. Between these two operations a span is _active_. +1. Activation is on a per-thread basis. A single span can be active in multiple threads + simultaneously; other spans may be active in other threads. +1. A single filesystem can have only one active span per thread, but different filesystem + instances MAY have different active spans. +1. All store operations performed on a thread are considered _within_ + the active span. +1. Spans do not explicitly terminate; they just stop being invoked; eventually + Garbage Collection should dispose of them. +1. Every `AuditSpan` has an ID, which *must* be unique. A UUID and a counter is + the base implementation. +1. The `AuditSpan` class does extend `Closeable`; calling `close()` simply deactivates + the span _for that thread_. +1. All FS API calls which return objects which go on to perform FS operations + (`create()`, `open()`, incremental list calls which return `RemoteIterator` etc) pass + the span into the objects which they return. +1. As a result, any store IO performed by the returned streams and iterators MUST activate + the span before that IO and deactivate it afterwards. +1. There is also the "Unbonded Span" which is the effective span of an FS when there + is no active span. +1. Calling a store within the unbonded span is generally considered an bug. In the S3A codebase + this should never happen outside copy/rename operations, and will be logged at + warning level in the Logging Auditor. + +### interface `org.apache.hadoop.fs.store.audit.AuditSpanSource` + +This interface is implemented by sources of audit spans. + +```java +public interface AuditSpanSource { + + T createSpan(String operation, + @Nullable String path1, + @Nullable String path2) + throws IOException; +} +``` + +All S3 Auditors implement this interface, as does the `AuditManagerS3A`. +(Implementation note: so do `S3AFileSystem` and `WriteOperationHelper`) + +When a Hadoop FS API call is made of an `S3AFileSystem` instance, it +calls `startOperation` on its audit manager; this will relay it to +the auditor is bound to. + +The auditor then creates and returns a span for the specific operation. +The AuditManagerS3A will automatically activate the span returned by the auditor +(i.e. assign it the thread local variable tracking the active span in each thread) + +### Class `org.apache.hadoop.fs.audit.CommonAuditContext` + +This is a class in `hadoop-common` which provides a context to auditing operations +across all instrumented filesystems. + +It's Global Context values are a map of string keys and values, which are +constant across all threads. This is where global values such as a process +UUID and the class executed by `ToolRunner` are noted. + +The `CommonAuditContext.currentAuditContext()` call returns a thread local +`CommonAuditContext` which is a thread-local map of keys to string values. +It also supports a map of _evaluated entries_. +This is a map of type `Map<String, Supplier<String>>`. +supplier methods/lambda expressions set here are dynamically evaluated when +auditors retrieve the values. +Spans may be used on different thread from that which they were created. +Spans MUST always use the values from the `currentAuditContext()` in the creation +thread. + + +### class `NoopAuditor` + +This auditor creates spans which perform no auditing. +It is very efficient and reliable. + +### class `LoggingAuditor` + +The logging auditor logs operations to the console at DEBUG level (to keep the noise down), +and attaches the operation details in the HTTP "referer" header. + +It can be configured to raise an exception whenever an S3 API call is made +from within the unbonded span. +This option primarily for development, as it is how we can verify that all +calls are audited/identify where this is not possible. + + +### class `ActiveAuditManager` interface `ActiveAuditManager` + +The class `ActiveAuditManager` provides all the support needed for +`S3AFileSystem` to support spans, including +* Loading and starting the auditor declared in a Hadoop configuration. +* Maintaining a per-thread record of the active audit span +* Switching spans on `AuditSpan.activate()` and reverting to the + unbonded span in `deactivate()` and `close()`. +* Providing binding classes to be passed into the AWS SDK so as to + invoke audit operations prior to requests being issued. This is essential to + guarantee that all AWS S3 operations will be audited. + +It's a YARN composite service which follows the standard lifecycle. +The actual auditor is instantiated initialized and started in its service +start phase; closed when the Audit Manager is stopped. + +```java +public interface AuditManagerS3A extends Service, + AuditSpanSource, + AWSAuditEventCallbacks, + ActiveThreadSpanSource { + + /** + * Get the auditor; valid once initialized. + * @return the auditor. + */ + OperationAuditor getAuditor(); + + /** + * Create the request handler(s) for this audit service. + * The list returned is mutable; new handlers may be added. + * @return list of handlers for the SDK. + * @throws IOException failure. + */ + List createRequestHandlers() throws IOException; + + /** + * Return a transfer state change callback which + * fixes the active span context to be that in which + * the state change listener was created. + * This can be used to audit the creation of the multipart + * upload initiation request which the transfer manager + * makes when a file to be copied is split up. + * This must be invoked/used within the active span. + * @return a state change listener. + */ + TransferStateChangeListener createStateChangeListener(); + + /** + * Check for permission to access a path. + * The path is fully qualified and the status is the + * status of the path. + * This is called from the {@code FileSystem.access()} command + * and is a soft permission check used by Hive. + * @param path path to check + * @param status status of the path. + * @param mode access mode. + * @return true if access is allowed. + * @throws IOException failure + */ + boolean checkAccess(Path path, S3AFileStatus status, FsAction mode) + throws IOException; +} +``` + +## Using Audit Spans within the S3A Connector + +1. All public FS API calls must be marked as `@AuditEntryPoint` and initiate a span. +1. All interfaces which provided a subset of the store API to another class + (e.g. listing) MUST pick up the current span, store it, and activate/deactivate + the span when invoked. This ensures use across threads. +1. Methods/classes which operate across threads must store the audit span which + was active on their creation/invocation, and activate it in all threads which + interact with the FS. This should be automatic if callback interfaces + do this. +1. All S3 SDK request objects MUST be created in the request factory. Add new + methods if need be. + +## Implementing a custom `OperationAuditor` + + +_This extension point is `@Unstable`_ + +```java +@InterfaceAudience.LimitedPrivate("S3A auditing extensions") +@InterfaceStability.Unstable +package org.apache.hadoop.fs.s3a.audit; +``` + +A custom `OperationAuditor` auditor is a class which implements the interface +`org.apache.hadoop.fs.s3a.audit.OperationAuditor`. This SHOULD be done by +subclassing +`org.apache.hadoop.fs.s3a.audit.AbstractOperationAuditor`. + +It is a YARN service and follows the lifecycle: +configured in `serviceInit()`; start any worker threads/perform startup +operations in `serviceStart()` and shutdown in `serviceStop()`. + +In use, it will be instantiated in `S3AFileSystem.initialize()` +and shutdown when the FS instance is closed. + +It will be instantiated before the AWS S3 Client is built -it may provide a +request handler to be part of the handler chain of the S3 request pipeline. + +It will be closed in the `FileSystem.close()` operation, after the S3 Client is +itself closed. + + +### Design Decisions/Review questions + +### Why use https://audit.example.org/ as referrer host? + +IETF requires *.example.org to be unresolvable through DNS, so with a well configured DNS there's never any host to probe. + +It guarantees that there will never be real HTTP requests coming in from that host. + + +## And why `hadoop/1/` in the referrer path? + +Provenance and versioning. + + +### Why no explicit end to an AuditSpan? + +While most API calls have a bounded duration, e.g. `getFileStatus()`, +some calls have a very long lifespan (input and output streams). +List iterators are never formally terminated, they just "fall out of scope", +Thus, they'd never end. + +Having a uniform "Audit Spans are never explicitly terminated" design +means that it is consistent everywhere. + +### Can you activate an already active audit span? + +It's a no-op. + +It does mean that if you deactivate the span the first time, then the thread +reverts immediately to the unbonded span. + +### Why does `AuditSpan.deactivate()` switches to the unbound span, rather than the span which was active before + +Again, it gets complicated fast, especially when audit spans our shared across threads. + +Because of the sharing you cannot store the previous span in a field within the AuditSpan itself. + +Instead you need to have a thread local stack per FileSystem instance of active audit spans. + +And you had better be confident that audit spans are correctly activated and deactivated, +with no span deactivated more than once -else the stack will become confused. + +Having a simple "In Span" or "Out of Span" model avoids this problem. +However, it does prevent the S3A FileSystem implementation methods from +calling other methods which create new spans. +Hence the annotation of all span entry points as `@AuditEntryPoint` and +a need for rigorous review of the invocations. +As with the need to make sure that we never call retry() around a method tagged `@Retry`, +making sure that an audit entry point doesn't invoke another audit entry point +is going to become another piece of maintenance overhead. + + + +### History + + +* 2021-02 Creation. [HADOOP-17511](https://issues.apache.org/jira/browse/HADOOP-17511) _Add an Audit plugin point for S3A auditing/context_. + diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md index c68c57d95617f..aff7694c52aa7 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md @@ -44,6 +44,8 @@ full details. * [Working with IAM Assumed Roles](./assumed_roles.html) * [S3A Delegation Token Support](./delegation_tokens.html) * [S3A Delegation Token Architecture](delegation_token_architecture.html). +* [Auditing](./auditing.html). +* [Auditing Architecture](./auditing_architecture.html). * [Testing](./testing.html) ## Overview diff --git a/hadoop-tools/hadoop-aws/src/site/resources/tools/hadoop-aws/audit-architecture.png b/hadoop-tools/hadoop-aws/src/site/resources/tools/hadoop-aws/audit-architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..3d0adf61931cda905e601d70ef45f62945617250 GIT binary patch literal 55701 zcmc$`1yodB+dhscf+8RyA`K!Sozfj50-~fKFd`sGhjfe}B`O`#C`yNP4-!h3(%mRs z1I!FF^WTF$&*S^O-|t)BxBkCa%e7?AIeVX-_rCAzy6*Gx-dz=v^EBu2@bE}fZ!2lx z;SpHi;o*NF!UtEbcV3~#!+VaWswA)DVX{7T&gZf&?8nX{Jmo8=$d%~mX2Z`*Z03l0 z(GxfsofXyDzqY{Jel0)ac`0Ytr5)J>g`5%?`|g7 zIBZ&OqKsx5T@(y-Tq}O+xMt_?qWm!FKAhS4`I_l6#;(1Md36*p0?-URJTXW>$;lsh zhxbSccJe>o)hNDSf86?i`bf{h`dFDz|89Xn+1F7@T#H&+cL}T* zEc?>pa?L(BCR>T`cxJryIR);_URfmWmujaf|I7rzJy}hJz!xUlCE($2r^5Z_Re<63 zyzr4HzOaiMi07{&`Q-MPApQ6+eigs=-zeTOjc zcz%!JpijwAy@Hg|@+PbMG{Vd`U^bV1%VDCjXMiQSn=9mXUT3)?p;q_OUFGtsnesmy zn(re&ww3VI>9~qnkBFKbolJ~=#cJNZY; z3R2;#ToEXn@9_MMi5Owi=(@?2vIA*357o(O@&3ED~r_V*3G zlJX}r>`0CqM$=t_Y}kcfL2Ec`Xg(;h1OJw8EVAD zw;8mAe}!1#>-b_~Y0{j_`O|0j(E|})4r&iQ9?gtLB;2L(!G3PBSnD~ob5`<3xdr?& ztaRN}g9vv$B&`h5y;bao&Y96Iau4tEocy9y@hAJ+GQLIq=T+W#?SFwB8}VdsA#)tk zP1-mE;5#E(jr;STHmB+iR)^37Dw_kl1hm5q>}xsgokn?f*-)%Zg7L7jUQ9@2GTUoL z<{;Obb>meI+3_UI5t4qBWf4+7?%8)YQ1Aw4<4#*$nDx?r-DYixT0*#1#|XrGckyL# zpH)5o?%YcUY3yVu;nMx08f2cPOxh(sl;!F-3pAtASf*Lq!QrIu9!y$wd?DBe_Kmyr zcvr2Z(moN|wlMg;#vQ$DS;>P*`CJ8vmgET z{TZ}Atohxpf6lotY<4gq8P2Fk#K@VPp&SQQdkHltf#!x8VFRvVA9Dz6>h&Qd1&d3s=&cNN^D4(! z$;N5Bg$`9A3k+Y7V2q|A`mXB09bK7S2!s8|W;6A7NWl%+t=%)8Mh2d(Ws0&U5~`Ja zW%h7!{KOW`L@7awjRFH9NYwfE(?3TTMb=Gn7iXKQ4mW*0k2dD zhZ-O=Tz!KCveKq-AL3nEU$3J`n6DT7#{fJkWNUM*EV+}4ZnCk``>@FQkYMlYYienu z@q2~owVr9V*fC3nBddWx3>ww0&F>?@BKK=~QsS29vX4J9T+u+x$*quE$EQq7;mRTe zT^Q`sbPC(4jx|!e{mwMYnly2x%6jO|lNv$0&Edi_x4YBU7;cw!!r;kwog>-ZB0dbJ zGilcqVN8#9JkQi5bhl7Y@I`iD_xjN4Su=S?42MN^_Bvf0HkKXq`h_V)jhUwO1~A`Q zS8Gpr9(Ado=U6cRL_#7qLtZpzNi#t+4le4DlwV1)i#WSl+!zT}Y1j_so^e9KyW-WO zgPhFLquhj`Uro1XCkQY6$d9l3$P+8s32Bk|(^m9Sza_Vzi8?Nz1 zxvjFCMYc&lV^ZF{aWi(Qb0JsoK6LpfVNqidB!QRS0rr!oBHRznlf2x4v>+TYu!E#M zSsQ*?+<5SzR(Hs4dU7CmtA7r z*S0H}WNoy=%zqeG7uam8Lo}=T+>eob^SSm$UruJ?-C~R$uw!wuy6<-o0rD^GfAqhb z`s{!Dx)*$vcWA zLa;~9oL-DP+<2PFyXBN?np`#QykS>G!hiYBv(-VEW;$1z$FU602fbj|KMn-NbiN$4 zl*Y%?`{aTUCT6a+>W8jg6F1#^YP-imuVK``bcTeK67$Ve^zd%%J6)EmI=7REF1ACToE&bbl|tLl9DT2zfuPsavnv9fLR>`3wXpUz)8Crs;9=ce!Z5qG&h~MB1 zvB#t-@IWp;K^4~@KWGc5>Y_;4^*>Fx|6@Sc;z~i9gRaFmSbK|Xo>#_&qDPZz<%T^M z=jtpZ^Y!jRF*WJB>62_rTaAZa`Fe#<)S)Hywu17so~phj!LU@7*61RE4my&Y}lbwQ|%a(B;vk4ig`1VKDgY+ z`h^PxC&6$x2T@g0@2uY|^qC7NIk)mKnNG@~`0?suRD#L4!FkDNRtc#U>TGkxuw4(Z z<%W_HAk!Lb+lFyohp>8=m069_-Oj#gy(P6PjjmrP6DsUeM=NZyIbu{QZQS1>bH5NO zED<-`Htu??f>jl}a$~8%%XOv(t*C1c_ep1vQLUkSPXBs{oz-0iQ9B1t|^y}Xg1lc)aO zPe7hguWy9&!yVKitA4t$AE*2(+tRYS|M@?L`)JxnUX2*NLR?SthrN@BeZD9Q-A|*s zC1cBbs1f zE3FzI3L-OV3%M!^dqGj#WL$NlS%Pj&6nDmxFSX17e+w#VVLdJbH{ZRXGLefAdJ)|EdmO?BfgP5gDq7(Df# zT>4Ugt3Ca%+SfqsuMAQz=#$H4x8}0r8a4u~cGY>Pppmjv%BAd+?oEdJoT_i(DKkj) zpnWwT{qUq6Of+xF@Jg2BJ*)ru{aLaa_SwMxq@w3mk7V$+9J`L=PX;0s+D$l1Z=X1$ zb<+Kv2m~Iea_@Ipi~!?v_i772HEEn{-YUC%dYiPW+u>J5M{PcM(@k3}VqHA&GvM|` zl5WE14_vyWCja?8OK$|3<*qszGdW@Y|GG9Nd>7K*DC$_5L4xhrHcdp8)7N`*)rFR~ zm#rzz$X^8i0v|Su)haJqDKUkOYT}wy(k3=ZF8uh+;FxX3&c9yz;Sze;_bhlORQl^& zcm_F(T)N_iA41@Q+`0g%`R)z&fmHtrjStPzv82a*m zS;@Y*Qkcx?KMG~^F>CEpg9}UtS3@KNZoS6yC5?LD2QK{G#eeJ1=Pec{t@Uj1$nVCH zhh96-20#DqUlE_&hyQDX^4CcE__NsFoWJ0av;z+hbb4qt_8NT}um7=*j?UGH^KzL> zi;Gw5F87Nq64ROHlP^irZEvwjQ46j2?EIh&ydr|Sg!g%flatfD(^5v_V(a18pR137 z6Dhb$5dLW>wELVioydyEK8XTxvhO*(S?fjOP&PsY{wOd`Zs0 ze5de?-uRpvSOl)RWIL5uvmT2e`@Z97%EEvs5s&QwE8g?`C_bhnx<%qgpbC{yYo+Bt z2GV2~DdgFAK|QcDc#b!B36x?$T)EB!nI(Gorah&<#ndHSWrnHz(^vwq6}Mo|t(?JD zrT^8L9+zgm#v76RjT7faf_lW$Y8)=|i2SE7LZ@uM>f2k~O2-@p1#+h#EKHp(;*>Ou z;}JcYtx(X!C_es>MiF#tiH?h>i*om>*-3YL%n$)@>7`%dfgUTK^!R=<1!hkQ@~ z#KF&du@8#e)r2BT*Yo@Oj<>LWMaRW`beJ{1PHcT)ZSyh4U2SmiiH@G-+pg3wD~HAo z&>Jd2h~uh$azzf}alR*kWhOX~=1SqqyUStOY4weE6Cx@Rwt_R4U$jp)EcQx53mE5H z3uJyxQ^78Q00X5k&pM-YKIeB*`r85tl2>;x0uME@3w~c`dkh3pxwogJw!FJor<1lP zM@7>G95}zdm0lZydSaMeJk3CG`Fm`0(kar=tDmum2!~Y|sVx=`)j8h5+DYuecgCe& zBH9`~=C`r*`a5;+zkBz1pW?ZT6s1B^skH|nh9d<$eLLQM_(q|dxH?m!`Vm1`){}oz zoRd>%#g->Jlbf6eMq&wQ|Jd`$5(u? z+t3dY)&&}A94eDPjB_6tmKs*h`~gEmcqrTwUUfJbC?0hflSpQOoDjV*B)+G@|bJ`YPGi4448rU=az>q41nNS(d2VpiqEOZ z@Duy<9r->T7$)oJ~T{K?(_72bZ{b$FM1VnmEs-6VNVY@P8 z#z%T#TT?x(tRXLGcSbhSJ`GpZ@E?7=5KL49Q)+s*PUd!5wQzdOs9r|4F)M5CC;QTJ z*FsS*$J^3rQ5!XiVcYQ`*`YQ|F~se~ z*{NABgOBHz`X{P1ixVI;m9AaKlJ<=z;hi{aq!R?zej~=BZa=@3fH2Giy3rBCPE+)L zeWtO`BEdC^yhT?Z9xHkGs;oks1Bj}Q^RIEa<_F866$?W?^1;wdUYg#98PTuffOMHSiJ$WxIV#iAxTok~W8$fbmhXc%w+e1i z>1E<42F|!m;O!k0R%SM*P<`TL1RKhGt3h{_W8p@w<+oLLm^&)5E^b`@0`msiTSPu9&2&_?fecE=eIY}s=r*1lVenF{(Y5}V^AZ=w4&mZYg}YdX2^9g~B- zVU!`XFN%8%led0c;45CH{c4FLC-4=sZSPJwj9Kv%njmCO@eOUcmEp`NK&-@{*ti&; zOPrVCUksN+M`_yA?5y_#aB+2Hs}tD1T&~mFqJ3)W>ah!BDzDhuoz(2LMyRhWLXrF1 z%+8P21~T*M_`DC+vzA)~^WF-2ns<&Hls$o6Ya2nhwB-=qxtqkng6Byj9^YkJ@xkgN zr|dH(i>0uirAd?5hrV-)cn|0AR%@k;Z#6Q%xFmkHtJ^4l<&){5{kWJVKyg|IaEFZlDuSP4@NpTcT=E>;6tMamSQCFLvK=ARZ zlVl(lQNWr%5%&r|P6)AJlQKiTqFh=rPJ48dri8}$!JG3EZnk`F7U+202X-n!;(U*E zi-xq*Mk4$MQO(2?IX+=S2V|R&i3V?d>gyn6ebW77?%Q(SRJo78@LhoD@HGmn7s`5m zYiw^6+#d%95bOvX+@TnU54;qzUtUU!_+ncp2D7_^^+Jq7a|b^Qg}j(?&#^cU&SS!3 zE+MoF2@XOa5qN6+vfG1dgF9N)LoR_YGI6h&UfDXe{)M<;F|YKcBBSLw#uToEF<2}W z-TC;gb;IR32e!D*MX2jtfAz*83B9#7#l~=MzR}@NM)&k&`--sviuoHMxt8rd{z`_b zMaRi0w8yMn1QV~E4+!@NlF#pbE#SBb{VJcd)-d?EqQ3ZAH9f-S0sABGaLab890#H3 zAPUQoPK7U`T&(8un4uZ#tJK1U4&2B`^>ovDntq4mO)88vD_?1gNe3Abo3(4VbZ@kY z8)yXMmvXDjoH~una~8K!tvS}kcAiql(8HTqN__xy%lA-F75gCA?ksoFZJiPa&1lm= zW}yv$THWhP=S}CVvY!^#x7$1LAIaSv%71d(6F`A-=-VH4^YwC*S2~_GJgn*+Hz3X@ zkLcdeeXsP`x6`)JD5yr})GOq}A))@zgL9*sYIu11LMz0N#W{(DX9l!4;b4`%d`Z>* z0f|`gWBdM8*ZwBWK!{&zWF|iR9Pvgv#xO zZ?(~{eZ_%q({?>hPtWqTCa`tKu1&2?$m8I(g9v?#P9HH}haz21fKpZ4s&u-i)uln^ zM>}3?D0{|?B&a`F(`FR-WQcvH5v)F_|AeYO-H_WgIr+dCCif7kiI&2e!`&HA0Yoke z4qpD1jgVG;qM&s?Rk^HEc#&XVz|QQGeKPUhb&)H}U2dfZLDhHqm+AT<`R;X%mq6-v z`2hwBCe!DrNlg61&*E@zfb#&{GU+pdi%r@*vo!Q zShI8JkU`B((tY!Wd+gF&3!#m8cqB_&UZ=3Qt!C5>^;9wF7eFju!$+Za7cW(#dhShrUAV6jP6IyH;{0g6*t(LFifdhxxM}Hw89E;xHf zpW(o<9rG@M!^b1weHetFTU%k>`e%5TYEfa!Cw4tafh@SW&A=#^_?k8`k3%zUMnmhf6Ou;$7|O4U-Qpo3{iG1?vB+Dxjqxs~?JQi)Y&OF3!!JB2k6`{4L?!MK zl+aP0N&5NYY3OOxaA&K>T)=Eji?#6Mo3)>4D2=?HuDCy`H}~Vt-^k%#At>mCM|kI1 zdp~|WFZmsE;ei8o_+b$V!uLFG;$pG;(+?%xY||6=UShHUp9dI?P+JGxCLPs>z2ZIZ$Ej5)yYDYKgyb~7#zG6V z^8!bcCK==VPFhc8i*}q{yqM?P{VT%JNaqVh=VVYvq}|MRv+8Q&6zQ@u->20i;J(FT zdW(u#g`m`DtD(xoyN9eIb=X%U;=o>cMiK^GN>ux)ue6O!{ZEeD7EMr6EK8Oys%rPCRK+G?=8W z%4XzlvPdrnhY3eWE%0V@9ezZy%mCkXv>Jm>DTr`j^hl*EA4L9!(9QRmROH34bv{yv z>nhO*4;6eEPyYNMvlBSrP)7?gPa>`To^xAA#`+8%|@@0rOVppH?j{sYN`e_0oboUfOAIk5G=@cYd|;+KzrvgsI${v zHdr|{``M4tA!LPJea3$9p@xd6o>>&(9k_Wg-{~luV)Q#bdtJtS-0Cq$y>0pBPz`Od zSdkCimv^C;UfSkSjPi6*+1e?L&s_PGjxC$C>B^cNdc~~r*FrKfs#d7n>+jo)ep-LQ zW7>DcBC#Z7Ed0FZElnxE^w+P-w|vVv6WTT=YZiui_K5dn>(+{B`8K>>+Ol^a47t-u z?QT!)`hYM71Ga?r9&`gm`f*&}$Z4!%aZ3z73|#oA5+(-+w%N zfbIb7p-uO=H-#fop{Cjlh&i_g zA6?AlZl?N%fhw$3%a;SaCBgKB8g#3x3`ct8JeSy2sSh33jg~idFJ`U@k!y>dVc#|# zPH3M_7qTg1(wtYX1LFW1#oh!+^FxFQy(Wgy-oe;c?^VjR#s8*p#PQU3D=|H;Xk4l^Q zb}CW#Z)adAo+~m|?*X<(Rkhe;%H1A&&u-5D9?R+6V`=x90_m{U08g_;n4oYJ`3ssK z&wepE>SCg$QKuanVNHB)@$%iFmLb+ldhn9GMqTN`@wSV<04bUnzR>BMjGFK?!ONcT zxVE}y=Sam{mlE7j+b^69;}pVifgrF*t25<=#AiwAm=vivVo6{^3#^H?5A9UGWOrrf zL7?l?4f&X^cRfp5eYAeZnugM{sAq;pUU#~aC2O^_4@zO>+f%YNs>XLNc2iAKcT?S7 zT;WX^hJmGquJ{?lCR3~ze`DC@- zg!4>6y7%orSLb5e+q4%;Sdh=L~(Y*b?zHX<@8uH z>q?lU8syh)X1uHCeZU~7ozV5O~RONF0o5P>{U+*btpL%A^K%DAl`N% ztpmUw#Abic?OCb0DwaOc#Z%JeHJ9&5 zlW1Iif^v>|owc7By3M>%(1>ig__rt2%J0&gR9mpv$$8Fze}+c?9kyQ1?AkZDF%9rf zOp&*Qf-zw-*Pi@3TsFW_$-{H?jj+r=DA$fS`)lEN%gfi7M2`JT|1P>SChCXzAz)6F^f8X&>R1o|0Gho63+5ZLO)>tC$Rv`?qQd*4WlZ%Nu2_I5U zX$rxnqxkKKd6G+@czA}d!@|xl+&^U{{2NQpAosW8z}6ZyKLAWd%HGaE1a@TtD-d5x z2q%0W2o@}$SPX;h={Zfg0k=VlSYm`GipdJBxKyPDWMSm?L6mal4Uv*EVq~c_CVKV~ z;4ADoVMex;`Aqzz^A0-$@qtuyZ*{}_J93Hi(lxEQ4u5;cY4mG)9!G|>Jz z*8r+cL++U7=H!2#R{&6Gz+P0I7fqW7Z_oi90dEk~tgDtINVp1gVch5rJIYN7TuCc6 z0LY&KMc>ZN<5Lb1+Fp#41lKK>L|N}o-GUlHp#z61o%U4isTy}5Yp z$w9_Y|1-M;cQeROu?N-vRW;EbsM=l}SM}A~zpK`2|EutULF%L)IX1gGSN0<9xkcg> zITrlzb1r{1nk_$Of^!~=d-G$K-)|N$<>kHCgzNDrIcBjk%tHoO_gmFpb^oGil;S4H zDnNTKNo3CTT7K$-obw&Zkb(D&5P{x{zJhZh%4@F_p@Mzys3%fn7N zb|&Sr^H=_*=~ax@H88w?@2l27uX0AIp2l;!eOB&_{EwC+HYZ9n#r>AE2pf-pyK(8* z_FjIeGjregVMS@PLo1YK!q@Qd?3?k3AzcG>k9s?*jYKer02$j1s)X+9&pbuSKkofJx}Jx!~gdNnF-;w3E<)5iM8j4P!`J+ zU(NhA51*aZAK(d}THy!H{LgOGkv`b5p4t9u7yuJU*A`A7NAYP5bohYe-)s7B7MQOK z!V?QWb@4gdqmffwCL6RSz9s^vkfrrY3ELiyZi;fbX_C^EIi=jQ%|5xA@Qa}+fm-vhDY-Iw{h@Iy?{&m1u-v@pyAz(YDC{7OIfUBSaCI)bW zYL|c)Xg{_y_z?j@nEgCRaY7`)UZK+D*TmYXY)!xvCUZU3V+6hULo~a)OoD6F>>>H& z7eGlKNlA%Q;##JM2+(dEH^Mk{kzqF}&KX2Pd)AcrM5vbRxVuFY+5m8Pw3z##JA{^+ ztMvR=wgkO4^Dz5x#kSHb_I%&iiGII_6T;*z%VP~2wKid9xaM*gExC1E;&O80z#){XG@&!SE&=w9txzIo4Bqpxxg}(UzKOh6o?(8EZU$k>tyS( zkpu|+^Gj@dw2H4{Fg-z-FPBOm!EUN|-{oOw$lCurz+Gsr>NU_-N8Do*{Bl7zG5OW* zn)MqX@NM518QL(3GUf-o3p~A~#2hF9Y0K>gqi0DQ$J%GijPZU z&c%7!iH54}$iQ6a33dFblPVVNmw2TOO8dC=ieOT`EZ|H2Yawu7orLX7+J|!sDk(+N zMSR@6Mouje*q=Cg(-j=t)Y%oGs~bEqGyxVDm<1%hdr2pi2WneKt6XbywC-?ebp{BN zIvbq>zs+!wg7J}RkK_}>BwruKHIVWj}e_~Gc7MANV~t!OXHQ9Qfx2G&4n(02DpK{w~s z#^LOA5GyDQw>1$VYK7Ul5do3pZ;&y77XKwoCe1S!9&SHn5OWxUQzA8`c)6D>uPRz#7sJA z-%r!SMz_ouyGNGEx&!{Dy9Mxk(c+%pSE);7F2P#HwuTdK(@9{aUZY<_598n0|D;u0 zbfVh0Az?D)sI6rWi!JWuh%D?G*Glo8t?STgA5l&l8J`T^Z}K-Y*u9ax%6@FQ>Q>yt z^|rL>@Zi=AJ9UsD?DIdA2acO}Q-!gX9?GwlhIsEAoq$g%Qn8}BkU$GYV z9Z~T`?Mvv97eoNH!bhqQ*RAOazye3eI7f&uO?=vvHc(O4Th_9ZM57LC)=POylcWMC zfyAANqac552<9tUVN2vWxwDYw3-Dq|SL>PLt++Lj3jAxys`t1KT*;9*AVWmZvNemEbTd{6V|o&z75XTi^li}D$nRg@lcHS3DOqH z-{90IXquK-!dk7#|41H zo73O<5d{UEk*dYr+wNOEQ}-g-UA6b7BPV7cQLL=vDS8T7)oShp`5%U4fAfb254C9l zX3b(?7JWzGVx>zvQoL;PL(#X0@-*f(iUO$XEzCPQ0}cD=)u)sb21#5IT=209cFnB) z-7|eZ#yvfsm_GG9nBQ;ynS7_b?m+|ZxGN(NvVVxyYN)6|5QLO2sXqNPJ_n~xhzx{@ zg&*yjHm683QvRt-Hg@3U>kj-889KqTIigb34-W4PUj0kq&9R-jQft_#%xnIOLw z(mo;}-o+#OCRmVsWCCEd0}&}-{imMHh{?S^tdAPgZcF445@1e{zO*U5;v{BKV^51i zus!m32YdD|QBb7&F8fDZ(EuqQ5mE&3pDzQBHGv5914xIE_Iid}>Ps+?rg9Bvwyjq# z?V?DB?sjf}jTD=OB>qgdA;LjPM2VW=fD|aRO1yH{1Uu#VBr(D7Q|4`+tz4pBwV|iS za0DV|7dl@y+RTo#I|dneysNNxAaqDjWt5LP zg8_TokIa%3+#Div+ z!yd`EELHIg=VXd(U79L`cLPe12&k*$3saMXNCbZ3M848XB~{-q`{^qDBkYgvL-W1P z(hSJpppUi`zI%{stihxnOnb}{$PY84t*M4NUd*6srhv(3tJ z;aPK_nuR`Ur_b{_q2a!OWR2ic226tl>6e`OsPS0YOqeorR?<;pO=uh48mUcV_o|@NQ2TWtmn>*6ti|x*&7VTxy9=xk8#d4V1nPGPjwG1zL?G9Y z9rb9Sx{*4Zd;uipg%}h6b}~gI90wO3?1br?cp9Z888S#69pIS9CA+7@7LvRAXw*UZ z{oSn`Du$>fpp5Vvi&hQZ07o66!4fe(yfNEje=Y9P_!fts1MKq(gl*u6zQ6a|&cr0{JwvR5^$k0HR zWOyFO)`|UM>!h9&eFQ$*k{U=+azs8BH%gTkWXVVXs&2c) z9LZBs!VfLqT)6OiPOmPne5-e0Lky>*Xz*QakkgNR_u-S;vRGLI`ji#XWmk}tbF-pp zv3YntI3)mqBBn{?Bhv1Bu8g{hI|@M3&|qn${vhws1<(8|8qFFl2Wme^CoAAflJhWMSuKr3IHUv~BT$vwn__fVVcm&rA<(kaFd+}Tm z*_}i`6SEm31&sbZ$>9Z~#1K&B{qj*=U~)X(sBK}u>&sS`s<;i8wzjsmF`|Cb7Rz$D zpz@11=}#;>%yz?)j8v-&WEni0usc1tOQ~<<_FY3?6G&1XuP67lyU8FO>uY}kRvl>T zDVA3Pai+X^kr+RVOv%>)meHBciJP)!`XPUx?jA_J!w*qS6B>%=RSflWy`s8wu zR;F*$f$JGxsdbL(Tw+HA=`vpcHZ6fq?9#DEP$!izZo=@L6K2o$`Rcvz0CdDJstgB| z{Z8uWTy6ZOd?oyNBKuNl2CQ4TZpd2h%1?|V$YBC*q^RR8-b<5)+Acf`ho_E*w9ayD zW=sYi*oG03rSV}JaVDWwe_xx4@}~`sO{1R?5fAUf?r$D7H9tat1R{u=0TnfImVFxg zX>MwrT-W`zE{$uhIDE?Nt|QtYZ8x|i%{#>T6yRhhzWpks z5%t6tICl2u(|=KOi=;P>eEn5Ff(=%Ac_sI1f>CXcWlGy-AvWoG>B35n2&>4I#R$k+ zzO=g+#P|SWmn56KlBf%3Kd!J@*I&78wYRM)1UT=wye%4jfVGy=8yKK^m1{<-D`etE z`bQwf>BX?|ZS>&0C!FyCzlsw4LSSCRm1P7XmP95u+E9-I{=Rg!{)Rb>>3FSFxWUvn zjyK8xorqk+*Ly-B%W=_ae}!CO7)X`;?`@dj@V?*qDsQUx$-%gTd=-@G_k$21wY;OQ zP6v{PUWe%5@>C=V@ZqO3-jPk_>fxNJo7=l1SzR|)e$FZCc(U*v=`eEfAFsOhjHIB{Ai6J}dE%Y<-BQ5j|w_ zK5uo#&`xHuWuKSslietW;O2rxkX_fsyC&$~OZ!UJOAU2N>E&ASntNBzS2CF#AiR&i zalFL==&x;H?b;g+H=gaEB&cWX;?&lwfOXG_QZ03H>)01c z9x9)9{=WZ)6^NMDR^1q-l5PU6=e`EXOnx?uYYuqo7Hy^HNiDiA8+WM$PQq->zj;BP zdR7jquuW?*!W&*UA+wKsmc}gF%08rh7ZFx}=DyiMU&bbOSd`a0`Y@_uY4YZA)bRH_ znTm?02w6HYniK?8{8D@AxFHDa9&*S$b@VjZtD@4Fx~HW-V-Eye;LOtq^3>{x$rkL? zH18STojRo99n-xr6hn;}h)FEdYUHZQi_k?E0x#XS8wcI~%J1Pj5>^2-%;?8o1T?kC zozCA$J*E8i>q1j??$SP1#(tZ-z?2n|)SsP^{RCJ#n;6SZVGIzN<;3Zyt4jin(@5`Z zl~$nvI-J|4(TYd0gqY8EuXmryLW8;m?6CxLT_7b{?xAmJtCHX{`}sGA_VO~d2QH)W z&Du;~2l5D*Kvb2Zo%dnkoc1`|IfI zUw6`lG$|=*(fOu2sB_F@dDXL?-VMgiW+2|W&SrA)r|FGupoRim&gY@WB#Y}dwKPFf_BYEW_q2imz*O?iCL!VB&yNjg0JAy*5R4q`E#$d+gX~*{o*1 z)t6XVve6~DJ}g-zxr%RZ^kdz53k{Cg8Z;)8gfZnS({ z?L{3u6U5j=p3zxdK#UpyLJyZF_KVTWOE>gk8kEz(z6NTv=SL^;IG}~5C8atpK!JrU2|6uipXj<)FFXN0?R|UPK$z-lJ>h=B?|5-kgEuW>OHgE>B zJhb{N>xtg>V|EDHfOU~fwdHS9FBgRL4J<{bn+^CeGJj<*TU*d=>Y5^10(AS2KEJgV z2PZ0*l&GoJni+%0r+FA&=kGhK>S!`XD{^O4#ZBr}Gpu3G-hs8n#(5&1fWCYQZHx0T zQJg5~K4X4PZcl%#rPPbO2Y2Lp5b`L|l zJap|hwg+gW5~)l%>;#FDEzX-`f;c%XloIUuw)!tB{V|{d@tCy!mtZEy z7y4f#oj%b29-7IN_?TWc>CQz40G@tQ1Oyp6%u(36g9@()$iwS$eEgrf-)S||N$+NZ z;1wtSD-MhgK(#U!&(?J1}~wmE0Uk#bA&vkT4 zM`HFEv14WiWLvI49acQb0>PuY!H1P#HcdN|4a*F3JG*|=r4MNs$?74apqX^Hm%sCb zl?}6_-zYfE-21wlhC}lCLoNF`K-O7_$Ln#&r9xM}q)NMSPJ^Z$0Kbs{G%atrBn27H zIZpkF#jZ7!uy={*(&~&F4F?1<`{Ju)D}}3*JerirU+n^(=DZSD_n@jr#onP78DL+m zf!gijR(Z_y@A5xisPQ!@B(Q+w*bhGT*>``HK4JQ#IQ+Nxp1+hhSKO?YJ>P(=Pa=>c zZI#^kP<8&UugbZN3!}otu4}|=vccO^w7D`LlsZ?~*l4ycitbZUJ^#TyXJi3Yn^<{vVY?IMnal*_s9?y{7AG z?A~fVXg1;A7Xr@eR=n-EfI93KNDtJXJJ8+%G(t}(J>k55d(1Sa7<_|ioZ7J!0z+C4 zuqI4;B9=H+GQer(x-kLmFUg#=u2=NohZto9fRT#;JRlzZo~j3>y)5O{(8|#3Q70z9 zAgP~)0opU9?^91@AJ=|Z%vhkoZYW;Tfr?EYVB(#k(^e3uQ9Ci)a?8g}8k&_t7x&H*K0` z?s~~2$XoM^H{A0k-!4I@w&`83{Mo@O7As4pJBw&D?l7S_z7s0*xuv5s<((ri5UuMB+fw(aZLjSFJUZxHy!q|~Q;x+c%@koisn($naLr>$*&?{k*Pw|tMR z2t2MjI0AmF^sUSm;Eao!q(nr(1|kQxq?K}d_S5_jZ0;8=_;)$5Z9}TCD1 z`y`u_Q%|S0{!2Txga^beZ_7;VFi%v~+s)N!(d|An`BIhCkvB2^ddYinvpdsMtS@Vd&fF znw(UAd(!)l?A=tFkNBUzEcsq7`?`;R^Qo@>O44HS9EWEe-L?o&A?kStU(xa_qLxyY z)*fB#>GiLU`&?h5JnyPv*6lKPh8B?REmu>Rv)+nUs@HE?T>!EG-jo*#YI3q<5eGXA zCfX|eVv6bl4z`INQv)Ekx8JDVi-$KutW)cn7(=*+t}@z4Dj7&aE9>$AciIR^jnTEK9( zQl7Q^*JiF{^k3@lYsvJj4;dLGztDS8yPNhQn~A$H$X6Dol357*qq+=o@_)JeHP-6f zkCnLrndugrRj>#JzSNgk;6@aDfyTvD{pJ6eqA*2To>OQksqr>%4-NXJ*$aQX7IZO` z>pym)SNq_KdO^wx%Mwh;5Em59_RRf~9 z{M^ajyNm~UWtM$3$>*%fZ`urBt<%XIILxrj=JQp9eWo<$_CV@5fzJwsrbkkJ=^YIn z8ElcCVY)_wyfAn0X)L7S&a$nbx~M3XypMwq!;MOzw#+SZL!cR&iRN=8_!{7JRiy1! zeHH^L@+|q&eoLaNkRF+J-R`kawi9tq1WufTjQ8>9;=l5`<&(I`{7txHzi8;~7cz>~ z(zXYt#gBV$0Vz7r`FP_pTEA5RXUD(f5nXUk33+jD$P1Xf63sq!^M+{|nKz}2$a`EG zrpgQpE~ym83Yk6snqYz7HvEl;hBVzi$+d48eHZP?l6^<>-l^IoJ-Xgv@+IYsl`}5Y^$?fp`q80-sv6&{N$f(h8Oxpx za)}D$zY$(Dq6$AGY2K4hXP#sbL4DE>N-1-yW~ZS5LM&z!!+;>rXr6R4keYaS(1KN8 zPZgJLdF3jgjnZXOO?lm=Kr8o)e^RBdbj--2cZkm!i0ySLIMinec=OOu3*<+<@Kt2Q zEGTE6k4RN7jsqO*Nd4XVpx=qDH4`m6E0iB)Se(A5o50uLI7T+kQR;DJ`pYWZEOqi* zx}fG{rfc2nhuX$)DVynhkC2aukXjyKKtEe^R~OdKQ!PRLmbW3egp!-Z3man3xNYVR zuAwD;l=YfM7r(9kXk4)ie^WlPpI|A7IjEBL*_&X2V!kIpk>3+6AijRfQ0$@0l8J|g z*@0o;1+sk-p58%o-K-H^pM$No=*FT+WKAGge`eGqgp@Ol>}t^ILRyVUa|04%xx@itIGo!Wcw z>Bs)G6J?_}NP|s<9=}?s@Wg%I>CU@J^UWKEDf3;LnB(5?GtVuUVWW$cRf}FMA#>!J5gTuggfv#V_zCK>l?xX#7wA?9ta;<&tjiD+? z>jEF#sBS!{7n(pFT-UfNI}g5B;yVXEwQ*N$1{pgRyg_?y(e2tB@R=hWvJseX9l*j6 zP0c02@2Lb;M#Cm+@u;)KtoJVSX08!R+Q*JS!UXO@F`+}U@!=a=726=#O z)Trw5A5xxcgzBeZTeLC4ztj_N&A!!M@)T;Py@f_SiSFAGJsyZ@s!G4`c3Al+UZ`Ru%Ap< z_s-1qIJDmw0D6Y}U>evPV5;H?Vu)0iCC=FHCq{04XImtGzvDL;GIXFg+$dmbDZ6K4 zz0=LZisCG_kVAg3R-X-)9KtyUNN^hZsV(2Xa5|Z4FdfwN2&iQu6z4w2|IQc>nviDkh zuC?Zx^O?__tJ_y4QP)~-iT8GITk?rg&lETPvj33sUjuZ<8NKc7b>7u9rf_eobYIL! z*6^g>8LQKA@q76EtQvo2ZRHayWVP7wzcPfo7~OwFenPDXefaMZgj=TUa}yT($F-o@ zcS#w2vr;Ph5J#srJn2u?Z!x4(QDk)ahVsOmuW!y)T=*QLcGEmKxXOz8MsOsgjxssJ zK+>v7O(*R+WN7*8TmePkRApg-)?*!|Zp5tbZQoYxryYCnl#**rnJus(VUYocRH98MAe;kT*)NS)>7_!$F7uJ2I%vyVgxN*80R6vT*g+=Q%9k9a~rpX>4g0q4bS@=yqzd-szBSTAj^CmrT2V1TaR>qbh-sc(=($Y)(TDz|q zVnkxRUM#jhU3w}Qc{p3Qiv2eRnc;|5N6$s-<^NFmisDW^X>a7 zC&q06EIorcdaDH5C`~X0yKe!xCXDB>d2Zg*|F;rxRM#9zR&LlzQO%oW#QLX*&v<1M@5Flb3 zslNLW+M1fW733BiX~nMuN>5&hV^3NaP#v4KtRcL4O2~m_{mvdQs0bv!e$2vKmRT|4 zuic*iRkElUTLHhY3+wX8eZ8nZpu(F9YhB@`mVP&aDh6sE-U}LF|3-Yp`uXQfz`0pY zw#EYh|2i5gSmU9rz2V4jTd&klG^2=X`w1j|;@q^ma8EEBoN;ZhyYm2Oe)rN@{Pv=| zw0JWl{R)%y6I?CSK&JODr=^76#_0Y@ihn=tUGPVc2>lTm?YR_C^j%Z+T%_gCJ6HeP zB<5V+f}`Dz+X=#dXavZF;pBsX}hfhaZ=1WH*Jv4b%u6=wo5{#HYY(>$7YeWXTlgVu}BQvvi_>! zIPpV18~)U)(5)IePi3t=@H=^VcP0&%{lr5JNL@~z^*TLe!A`KpwXb;Z#xD8o&0gKT zB~XHMuQt^z@e9yS^Gl?62!0D)2j~+}o9S1Z!reMcIu-9%&j3M&C353*lS1R~Ry7&2 zVOxwNg1q;Fb7NMNGitYcBTw+mBD_eyi91N}z#lxdkW#=qKU zwp&(xW*0g%{yde}+0Q$$1jAb*WclQd^Ayt8RqXnr0&>{@iyi*OD(eVASwpq;&~g?? zc)}bd-OywHGsWMtnMOB&^rUP(=mkzvAn^|x8X3Y3q{;!*QpJMU+(yT`d4>ilF<2L( zeUMoGNyU2j`=gmLD11dek?9#gTT!Zu@lGnuH%2o!=Ds%L)Zp4;`1GsJ`xVna|g z5NHUB7K6K~cniyesDI9E4~o!kzPw+O;(1qY=3<`Q*91_JOhI-}*jmLlccBdOwzC#} zP1n?%G9fsl+Ol?p(Zu7HlBY;~(y+5!LdkuT8*ottQV{h!*vC6&#sjk8uNp*^ekv9+ zS_e?igtH%t+#xEWw*rmmB&{u3kZz=n4fB|qjc~?|fj!G~75$sx%61dQM(>YmqQ~3- z0>%cS{O;l82;@i7h{sPxt|5D#A%AYlnx(uOSp1cMvLgO9Lwkb_YwQw$ zN{z}v|UEH05;FUT?}_sjmr(d`l4SRIUTmP2jr(Nr+&~BGbTTQv{7Jw$k01&HcT2G2E?)IfE+fl#pH&H1pkY-vJ4gd-)6oCSD zA@7cxA2)DYHh=5}bN1r3rd9yDOY@K6cdh^fFBc=d@wg0k?#Tb*y#;UqgfE3HO5C%u zknNF^jPi*Rjq&Zixz(4eLC+1;*oZz#?mUCO>OV z>XNX!-v5;RQliGXLG)8#Zy;F{v3u;;UgEl|T{k9&n$=mxlm!XuY+Qc%&8hFi@S7cy ztHx(?6;Sh6{dP`!T@hPUkLAt-vr3vI6!Kwan;!47s3Of-q&1U{iVJl15*O`~fDjrk zmCaZK35jJrXI5GQtR!;Q1|0rby%q{+?%`JJryc=V>vKmz6-88=%(XOP>(}=s$O!EI zTa41mQfDyE@XGF1u9DdgY=DQBdLTjtP&tM;ft?&AabUow1d1o*7}WBNIk!fGzMBk5 zVJeW_t8?}?HO#nQkx{G#ro!TFZS-jNpF8h+oCgZ;gT&i)M z6jl84X0H9{beP{mZ7Tzyke_9br7n=zLO-(yE|noR<)oQAoQQG#_Ea4_lBwh zm2FLLjXYzJ_reZd zRo6MGU>4@SuvQ^eH2QqN9H@IaIodl=avn1r3@EaX2-<(%>U!m~5`6OBVSPhq(?B1p z2ktF{0lp2JTr~gF)=PX_-1OAl$9pGi8?QmPkE~-EvGpsiXOKv;^`I*|r*2pwG1FNkVTJ3fvy|h;&c>1up3tz%F0xl2 z$-kKnpgVW-{EJBC@@}ai*L`I*wS&dX+i@3NZ@Y>_vj&kHL2%cM$UVV@QA;E=v0sZv z8&ihPl+KSF+uQM9{83a?UXH?-;rv^Sk;v+ou0C<@6(wK2TF1jDZCt~_2Q+Y>Wdjwh z_4Y>gz0bSdQ^|L6;#H%0VTEqsMRool9)6uj}4#i*Yhwd|=$eY#2o2>@SYHp8RkhvGl zvJ*QxvtR7?o|UdJA+&%2Qq+sTopND1H!br)&DVVv72QMm{Er}HBJocDLfPw(Y70vr zn`L3~$QvfBdOiW{VjJNNUmHtpcrrM$6{sPq zW%9M*etV%#uuy_ty33$sya|Q-;4ptM^mR3QyP`n5NnLlxZ-c@6JBM+9%mBNaBzA2x zwYfDXB13xsGkjn$`l8ngm1-~T)k?l%0>C z3+|e!C&i^vWrTcv6DcULyHOs1oY0^vXJ>zZk4)81;;z@g6xfoa$G@AE zxq+6{cCgPr)*BGFe$3!gL@N9YFWhrqstj^O2lbemy^L$Dbf+lm6?>7uZF9{?r z8q8~^G@4siDD@hosf%4#xb|fAO{(0;9n5(Sq?_ye=VD~tBY6jUuqzEvHcpI+m7@1l z;^OxCh3sB_<)YrCn8Hx0c=|U+BD*gd_?+)(9DwxwwYBRqi}-jryDQwyE}48$Ql6hoO0}7TFTE4+MgtJHtPiTZ|8*kMNfuQe~@MIi#4Q<~CX% z2A@SWbf;Ad-8#dJ9lHfDBapD1b^rAOm6)Fsj(JvFd?meVtKSdwaX+}CWfNsrTVuam z^g5uGm@3?#d>Fx4u02$BEcwW)y-Db~2}1JqMHG1?LIK@LnXf{?-f)H>;CLrd>F{VM z7!nX8)`X6=my@>1esJ8uPt9HMd*lm+at4VI-Th;D&tZiXT$Z>fZ4#}S^17nn8F+Ri zMtmiFpCi09Ohn1I5RyA8G1?ig*|gRTM_5k3;X&TWsl-&llHHt`A!*;6{SGjoRd4Gr z`)$T@QBU)=ZjW^gKIV!XQ%u?z$1J^#%9!M_^xzf4NByuG| zsFwRjuJrsTp%$2S0K~t19p)o46K72MsN}TmC1K@grE6J?;CQ22vWU+mPSwx0j@}`{0*T7c%Kn#;?@UH zk6*uBjSDR5*(GUx$W}+3sq*ezWFo!xdGUmCv~zu-&YNV1;!}-AC|?t?}Kn@dEn+1pCyS zsAfD;mkvdigo6x*6LY_RafDP7Cblaga$U1KJKpPQ9XaCl(0C&-@K}OnCSEV@E!6W@`#}eKMUrZ- zOrTHlNtIryP}qDh{OV^n4qw-9Q%X-XbJf@6gYxo7EkWanZ-^2WXJ->ZWBj~SXL#Y36Y#4q=dMSd6y3#T179sA&bws7Zn84d^CsJcBM#H59l zHsl5!2((KS*u}c=&+O_wP#xkK+G1k_RfBD`48jK?aW3^GIsO*Q(yx**+;;A9WxhA} z=C}JdVN$_E zlCO4B$S#-fLj@~;s--Vx`uI6J_FdDImsvwsi?%iRM=Xj8pS_nErS&e%e=2ab&f0(D zu{L88TH{5oP$8q&BJ3*S_-crZM;4y0zCU zKztRLtH%$TvW~w6_h}~brMltJ1qyO?HCA~Hob}sZ_neQq$0A+_JXtID zBxvJouH-5mZ#Ww}E1SYBci(Ah$7lILi2d}laV9Wj_}p7p##P;}cx_%A!{sdSScDMa z0a#}GwfJoPyn)4e=%tQy9tdH~IqTIb__SyLF)=UcK{&0OyN)RY*=aRpMXYPas7%#K zf6QO2MH5&pMKDYw1t1! zao0}8iWFzj#Oc_!XvW?9GaZZHCHW>TWc1{v=04+{QL4+g4?POHUWC^8nkUCK#FbQ2 zRvJZh>B(=Ey83n}s46twW3hq&j`?Lzby_PcRv?38;Bz+mJ`p=)rl0G` zwQKi1roFbUv`3Z#j>n|cD%m4g_Cz|YU<_uAO!BjP7Vxxtb}%3I4(ILUA+VN2XnvTF zEn;weEyZfrf$`{Pau2$UFk|tO;6+Zm_ zb2XL7Y(Bs=`7O=wHRK%NGnge~sjJ>*q>_S9Ja8~f4OmWHrk!q^xF`D6-UIVN8NsV! z*8UM5o&Eb@br`xKUmdX{e?d<)bG71VgFp>e*JV4}_!=B3;z<`sjuJANo4swIOE}E0 zP)_&q0kkrr-~O@#&^wgM z6NtUx#YV5sd`%r*0igP7a&WvHyWrk|8UDoPJ$6?{Q7|m)*3CRmNv{!Qv)M?E0VR*9 z42Zlph?LV9|7F_@Y?dT;ox$73@Cm#3)Rn_l!HlDzYEm~Pw?hhBbHA4{@17r067fpC z;b21EOe5S{ITH;365k}LrqUcn@q6zkb9_k6IZ%70^WK+ihiIe6DQx~~nO64;$%bs$ z^VBlIl07Vfv414_k^g5au!SxppS>SX6>EP-_}f-@)kTUMU{RrE;F)sPudsV3=8e&T zw#g^ovM6UdvPas!c|38mAxP-)app-!e%P>EsQ#aI(}s(lEFm5&<(xAL|4oOBq|?Bj zSA^_*8X0%dsOQ)ZF_M359;2YGpo7Js6KOD7ooV>g`(}O z^6NP9G_y=^jW8#w!=HmV(! z67_7TIIz@rc{(WH+{8mVqm*=yGOh)Itrr87Qy`=IEmE();{M5^oWhcE?8%V!CT9u8 zNAJ#uIl(J`;KXDs8ZVotFMhzBXKN##hPDD7wB)U5A;MzTvh*x{ z_=f8IRwPEb4vofZY44=KU|RW#{1?UzM}bc9`Rw(XK3HKwio9;s*459tEVRyglMxWw z!`kgnVFF^tq~5VdDa0>%Nd{SSt}mucKHdu#E72y~OB9)UZ|3_GiZChj2P+b2za@!j8WCavxD4;f#hHF1miyICq3 z;!X?j9jaz~*|WLoDADl0yFpyZ%ydbG2b`0*_btS*1#i4|?NJcnjAh^s+08D0--T}k zT9d54<7vrfq&j^DnP^2JTvrYsY`og|sVhX|1! z<7-?3;v~QFkUs)Eec*75&3H*-hD7wt@SM@V#_2Z~tF&z7o?vv-3g$j4dkz@br%Q*{ zHK7%=C-S)JvSCQXje&@ZTr#_(bv+_Gp~3_3r5@g`mhDI7As6rzbT?4svfBjgw~%E+ zC-3zh-n_m$z{E3)N)2}O4Y>8%yUL)wz*)b__}q>1kj-(KHxhtAFUQztyK8)W5B%b* z%fEk7@YLvh@r}SkDQw zAf1XVq?7&dZo`1Cc7r^WzmT8?2=R-^4`yIdJ!wDn=(fYy*IzPzxB&r{;hEphz|M2_ z@~sz*G(o@vm{Z!3;x-=9y;5=OB%Wq^F8Jp1`Kg+_z{I+4sE*Hap#A#B{*wfjdhSE$ z>38zu?D*SBlI8zLkMyMcy?zHynYyY1t|oR{tyYCA_fBOZT=v}R*-7fGF>_qS;rUb63`F)PEwsAxA@8Dlj?yX0Xd2K9}jc*}zD*lhktDZt&zy z_)4a4zkyM7FjuD6S6En8N2RRj9g91)em!)G+*80c+g-NsxuR^?uo*v_1|1#fP2@gn zuI)>ZsO3fP2P3I!5@{9>pHW$bUzhMs6pYuXZ6Z504{Uq%kAa_3;;6}$GdCO}$>~Mn z1vSQ@p1xvxL(| z3%=VStN%z+|FSsIzpNMi{4qUyyK1J@#k_dOic_*)cTHqFE_YDQy(KBEwDWCvs(HNNqu%bS@a%Z##*I_wqTYtn?o$mXd1B-j0|*t;J>O+F zDlM+y_jf|aMmx`;$X~rc%Si6I#U0}Zoik&)0<#MY-oA$2QQ38{2|m{I+PJ=2gL%&F zKwowHZr_<$wOu}oJ9!SiK2!oRJIn0se}@o{ zBq#!30j;|usxp{kEwYu$3%-Bt02S<$B?>Aj0t~BJ-DQi)CVt!#_JgtXs}>}t)ndcv zL!Y2~>a9Uue|0Wv=j~yovE~E+qnb0Mn)|-m}z3*?_eaaguVA`V|Tl3SSwHs!Ek|irYyEE47r8A`5 z85jD_5DJvne-;~l=9VrhRY|;CCd54u+Yx7VL=GT|zohDEzO@B8SolI&R5m~?M~Ol7 zW%eJb7KClKQt<~OICj4fbvt_t73Gb+pf>e9DgyTQGh=31j=kJgCy#GDZV_XjuYJ3& z_G#HQ-Z&~>h7Dm__Q-V(r3d7Lr6p2Dq)@&(&(1JI7`>R=ZatyE#JB% zF#$`;^geu#1S{-mKP2*ATT&AWB&4!=7klCNQVTx*!{S_LJ@w{11KIN65l~6MSMd{UuIKHWucda)J9X3W=rGhEF z?_Wr}oH%k@Wg4(|TP!&o~bn)0aoR+Ci ztMGWh84G>T5We#^x~Eq6)J@3_Nt2sNeg|g*zWMjbn~Gw~A)A$XtChganp~-r6EnGc zPp?sM=AN(dBL-JQZM%fr%=7iFu+n%MJ8jt1%?5j84FQtCP#*ZMC-{*SWTm^0FH97% zt1H%Ft;4Z}9Jhnde>9A=biL(sfcrL?xGc`7S}Wg^YJTI#eC_fKJ7$+hPMH5qc7TDc z+JI47Jcr~}g_eeFfpHk-*Ia#K-6*kFK*f)K);1@L150AYuX-afJNa^UhE5gA{m5u} zCORi=0Cb;j;TPz;q~Be6Eu1BN1?b;CHYt>8m#tDMi-5?}_fll%Gu@o4w+m}ljJCLbk?f96Z*WL?#*lbo6$ zm!yep3l?>pfNR{MUWe4!97P_6rY34x&&l>1NKlV#Nu^82DUsy*qg~FHIM~g&7ry8o zJsggaxKfQ*ta5fAaxYLvN>lG~H@&e^&WrCmlLXF_paG^_P*VhSDhEHCFoAV@ zov?pUFup3(%*glx?)^dPf!zw~sf0D)}FRPIaj3`MhH_Y<#&ACTo^~q=# zg=f6v8R6!@U@=QcW5 zaGdC6f&_Na;rro27Ke+!-j6+p#DcKQ)`*bS8)bpiMNcS_zELD0PC^6nVcxD#h5n1H z{N@&LSx?w^l`-us4V{xQfB%4{sMoA@p`DV}vMbOb=weS!-=0{FVw97MciR37JCCP% zEo=ndqoLMvm^MZbZ9Ux}%GP8mo>43vgl~p!86w?~+*)-5ZGMU%2YRIhxcA2?U6{p- zaN4d*V$>IsJ1Mo|9*34l&UhD^^i%Khp=FdB4^oM#wQ|1${63PlJ0DMO#=1x(8LR1_qaFwCu3GHzn~` z6a=_z@oJ5mp^=o+yL`&&WYwsbZ`#qDbm1+ z50)8ET#k#J=BI{`3lz2pM35!7WU|!Nv z7jUNDQx(J+cKig~&v@cHe?hT0IPUEZn0NVpB9(YccOnoE&ezOrKHW<6e&^70vI56D zDc#nxdOiuP{LA=~1KMP=XfVrFrg{s3SXSP~pAA5Hwa&@HxYR`kyEIiQQ;pG-BTbBk zy5UBjmp(OnA-12YiXq|uD4_=Hx3_wY4jX-kBp%i=*|N@7a*8L@yCq>OU%z&Dkln`= z7=G$tB*#);o&-k4R<@7{9a$PBrjX-d@KggC_R`*urHqN+rvc^&{-rmU#0b7XXRIXjJq(6s91_Mj!FWW ze%R}ws?`^({}_eAVJp{v8&($Z4$91lDw9}n;Fs5MGqaC60$z-$Hd@JsVJikTG^t~1 z=zv~eGM2?W%OJ*q{%Dy8>2|}{Vl$p{BYXcrVMn5qxKdr7%w(OHy^+fpZ2t=cAQ1RR zQk~P+@_8@k#87Sed1Eck-?rfFJ8?DaaCd^V`Pw$XJP;FLPoX^CO-)SERv4=gvumU@DW~-e-If{JnTC`4mI%~de9x#diNIh9|eKpZWqEWr`mPkH8 zC~(X3%y+9|EpiOmuureb)>aK?l!M(F>`ZoFWi}~V$0m5suI87Ft7XH$EX$fOxt{Ul zG>T<9e9w}e_yBv?CAjNGH;B2uNr?ODK_!fa9m++4v$qNu zQ6)Hb0{cy5s*2B6P2(pG?4psD=q{3iTV>)qaO`Y>&>R$^wT>beSliM|>if%T-(dh9 zwx>4^cEcXd-8Z?J(jdw`_6VbAtUtn)Xn|*3{Hk9iY|z4G=eHJxIFWDv&Z81T)vd~X z5TgQ0kKr4SX3tdZ?O+KOYAdG*RjcMhMdRdPA!mnjh9ez*JXHV|qYyD2Xm>=ub$Tf6 z_TZajjTo&h-{dhtZdAwQNQB@)(EFqJTK|@DM{9emQ75HF3M0biH#uO# z7Kwf&hOETpDjFgIr-61=uW%6u4h}pM1L(C)qco$jK|W?-ab&k`755&}g}XrdSt7p2 zkI!vLd;QM_k7T!U=kPhGUzwp~|r>lj!+ z3CiIHqSAf%!gt7gCtm4Cim#dXY4XMr*g3veIsiNOqQO}sY*Z)!m0J-u+$Uls5&z*6 z((QQ#;974d-6xt2hOEj3G5(YPOUD=Pyajj~4Ud?@+oFV8I2Y%`{Vwa z^CNEK=?<}LO-4R-w{rk1Jc1_H$p-?#&8T9KNdAq8T|L4=CF;FS9g}`U_mboCiw{c@ z3t>W8j7GDAx52T1aU7^NI@|bTzg=F!z*>tVH|&;Uv#+qsPFeoiC|82*S5G4)>%=562ewXqbB*P-%@MDFO}Ff{O+B!p z(VvkD7hjd~M!!3-*@Pr^Zlrj+<<-@)&G6Yg70;&U5<37}&DIJ0%M3diC5OHKX4 znvMhJp7P`xf!DJg-zt0AL?N^Dqn2^k$7*|BYJr^?#ox_|j@w!-6myD>VfO7z-EUH& zc(AsKI41jG&C(RU?by9|N?~c+ClUTY%`Ge%1{y_cUKpXY3Y2 ztQn zDa^OhnAirHZHPa>#K~FMTJ)1ur5`o`)Xx7Xcu2nd zk;%%?dux~lhWqs|cd^hyaJfrCqt@QpI;lrBni%A><)*!Su%SJif#rdL^QP*60JCPH zQ&#X%{AP@hXzg+UqIr-xtk17&pWNVi@a`wv29%;TK>5Kz4N(m{_{|s0 zAo(SelYz{$HO;|^bf&P+;(yi?xxvPwTmwQu8o#}%U3!rE#NgCxRBMv9rtB}L^18&J z`CTV&Wh(>H)RgLs=Hx7E%&4~az5FyM(~rI#aG;{58|sU66TZ+x%Y@dp&kdXF3>?tD zF2zteU0*n(r!LIstY3K1N_w@31wkWTo-A-;+FFnvwg(naCZPpJxU`E;_JS!?=U2f< zykZ^TXt*kNsi9l%SYW|-@19ULtqm@GvL!8fV=J6WtP@{=BD7=;#H}ZH=d} zM2-E51FBY9G+(KFbK-Rw;i$RQG6meaYjBGXAfnv>^l@mBotA0;)lab?e2EqBbz1N* zpyhEljd@=#|SuZ357CjTC zklh`N-0^>JPShQgU36c)P8)rP+%W@g>dMzum>nKt{#O_2KsPN;u%(Bj>^H#U^i|%> z2@-l1n(=RCYzBW5&IXJOIU$&g^EY(G&_l=V@o!552&f1J{ZBjM!T|cYIz8xJpL&A1 z0d~TAdr&xA6O!tv`oDwk#3N{@slv_RrydMKKGV=HvSk6F8rj6h@a*wj?tcMa_#Ltj zTv^9aO}<%GswXGM`bC z!m#V}G$+T#{&N9<9CgBJEw{oFufC7cSa;efM-!0ch{d zz>nXZDyKU+Mp}0JPapr!|5zgrYf06!!SGKdE-|iA0-Zl4O`AR{^Tyopdg%_?+U=8X zI9qKAXYPo0oc#OE_BquxR@q8Hlav3vlY3=AIu!~KJ%A54CEpI=DAXN*cqj0hzw+xg z7Vwh~=c4ASiKnjv4jJ%4*6W`F4E4wHU$O#_)B9zRQ|%pICkGc_X8HepQ@;@WMZ@0+ zKJZ9pfNr?_4>L2fXPYkrBV$?c$yv+W=TCyOKY#v=F1NM&oan{X>0?NLGIicS0Kxuq z8b!_mz@{fZ1ejcJZkoZr1OloNcr^oZy8KT#t=HXOQvL!E6i&T;vqMH5^lJzX0TQ$9 z<-34J1XKm^G6lqcjtn4|q6WU*#`^PdtNarB8-U$<>Yo5B_*_as+Tv`I|6ceLmWxs& z8Z#s8XvV@iPkV5DcEF@X=PfLH=I@T&jiQwl0nQ>BF>N|L&Q3CYKpa%+oKlmN+Rp-r zRIR$r-g60zKq=b1y0)1rjFQf1CudR}W?9Pe`xq;Fwa=3+=kI>%ZGyZuzs<~&HroSe zH4L)|#+}YYsQ=IR06N{h&~MZ1G1rpsb}Z!YkkkaR{~M&nDwoJRIffZ<>2NY)^<-|v z*Z^3PmZ%stm0p2OR%AJhJZ&MaXVMA$No7<$adgbIG~mpy={t8qVR&f92pSjO-{p;B zC295zm=ZBt{+Q{Irj9I+0}dM&|9RdGQq$+X`lc}6|AO-BxeRnXd8&yEyQ(Ad4S=!S zSR?jX!sLVf2IkM=dpY{Q$d;9=!JF7-GG}D=iVdwCJw5LMB<8XDx}8>L{#{*s)dj#o z28QNOPmUzNvYHSu(BZ64Er)wV)8@egqpgw(q9SK;3ow{heQ9-f_h&EzQngxAiN65f zM84T8rq73BdoD6fzZp~zj>F)<4HMdn%lRQyj zkVLZ3 zfP_lWRuPo|{K?;)Kcnf)tB%k|Ubdf}y6^dTt-2={m;Tm?;9g;W>8};}nUuMpJ4wRl z_sh(A|x(m(MX=4J0^VUM#XOB}22Emd@~bDRW#PTWS~&RczUXFQ{L9;^Qy0>?S=aN6YM zs5-G}okXK*4zS#p;ZRGZWyB?ZZ)ky&i@u@d*fyA|!^I~mzmxJ-=VCoCVdx1a+W0Z^wye^@CG0e&D2@ zqm(XarerDIWnq<~NzsTgVpM`Sw8GP$EPI5sA>9SmDtwJy|JJ%-xY>)9*Z5Nb;*mhf z$xpZ|dt$T~qiD5c%(I4K8%H_v2KX}eh{UekUbU;$o}<)Mqx~jHUe)DKebD$zV*`bX zy(U?7J-xGItP_`=GXaFrmrv+B#6=wD?*;hb7G$e7v<8!mOO-x}PQowy-2$ZRX40pjJ{+mcKsW|HamGbeN}ww~4B;4oaM8qKE=G8#2K zfGz*Mi{QNi{=?6dyI}e0xLc^IM-w@fiNuZ%h)J100YYPdgAH}F#~qO6%7CJ5%9NCS zK9$Mrl{fE?EVa}na1_{Vu4WlzYRPEV7fXBCTkAdiBz@)ME(r>Iejt#E^O4B-IRP4qvot=9ZO`9;Kxh0^j;5AEP@lEk)Ny%ov}H~!LzbJ8Z4zy9s2EX+?Pre**HrMYA|_DbkWTo^Sa-Cq}c z@d}`5`(+sU0VdYXa{wFB?60ZxKQt2nZQ0*8@=Ym3=$EZPG6!shw~>*Njq_5!jf<9F zZ2@}~V49y;6Q@qB`d@!f?D$`AzJ&f-%Yd2F{;Qv-PJIS$^XmoJ?Z4juhWPI{|9hMN znQRiqyjqcOg7pz=_bbb3qABgY8MsDA$l%`$Ogcl{Yu@ab@h73a!3<;tN|(-~cxHFr zo-jUG!ae&%w5|y)tB0Sb2i`miS7(+#wUA+c5r{Nj?e$tQdMv_baj88SAZLiMeLFW2 z@XyT|?hbXpYi<31|I!PRCa;cDlE3-So#%cHcP+2fz4izio)ji%!)sT8wW5KK8 z!K;y*`W+JObc(G2{_YJ^AcE0i1M-Wl-!=IGZggyOfRC^E&1S?Oc8A+Qhr|j4o@404 z@TKDOuS3MbDrX0NF#aYZ9&kjie_91ZI;abt`bm%9qok=DF`O+4a|xk_J~o# zT>uQHZr@`(?2-DAYqgUr)D{TA28!%#NaSGY{bC4mt4U4~X%y%FSjae=t}j3rJ85#C ztiB(m8 zgn0~d!-^@;cuMa9bP|y~RML(`<2w0Q`GdKj#MY%>)N~5~+ZCd{gw~^L4vV=GXWX8V(&g>k)1Tqa*9#ik57c#!gqICjD?FNUg5-EWSMb?u)OH8;Es?OE=D)a#26{ zQmJshmd-TZS%$c328r;lMM@w}66Y#s%9cY+iGw7ud95a74xE3zKK;|P)Hjs0)3;(q z@aQv|{19Zl{|`CoT_TW&cL&n67#p^X^sNG)Kgv~~N*i#2>#KwVaX(@cWM8K-syM_A zqgHeqV7=~8W>xdevLJSmVXq%TS1TL&*7UW#v%t0H-(I&A-QQgCTj*Q8E1(-;o<-=0 z9@HJsn#Tq@9W`*KEqN8tJ?5VCYRv>3H}B)aEIWVVZUy9kS6n2Wj~?8e5^Q>?h=+jI zC7nRmV*eYOiK`2I?q}tP>h%bKO%&kfIp$A-JHaeiBEQi%+e%GqCrN()43*|o3$KeB z+)CK2?$v2TOkQKGVA?F#Y-$$i>%+FdzDb+R(RC8IKzxJ^tecBOV(m9)|!@L`9ozd=(7MYpUG^;M>P(qz(I%KFkcd zC_g=t5=vXTG;jt#c6*_unbb7UG9NqE9ca1m8h@=yU)9!SImJfrfx%QAzXXs2+e|21 zv*?p4iITjB9%b~0IiwOo{u(L04oYZ4!xz($ClR!KjSxFc6g$rmJi->#@xG@gt1lK4 zulS;L$=6A_W&2i?#^zo zE~JSMQ}7c+gp-mg_NB(~t0gV5A*&S~4YKCI2>oWCh>cWJNe>GD)Cmd)Jde#1rwsjs zc+yOhw4f*0{4ru%5{9-?sr;Mta(Cup(xpM@2LI}72xP<2s=FtDep3!G_`mkOMo04@ zX_ERwQtvQZjP#kz4py(Jw@DBArbX^mINBzTL|gUrD%ExrM8cO!(lXavZl_&)KB1TL zqCZH-!phQ@_fs1q^gzBz^5A8Dr^r2ZUT{~6eVk{Rk~BcfNh$51%!S++15SL$OQ=QO z{E009;K;Qq?83kN(cMyCv04Ua^|eG)z44%DLdY^BP42Zuk@>r_s0`PLJn|x*w+OF? zFG>`8is*j1!Epjp2M)?znX}%7{#)}t-Zwd37(5-k|IhNG0dwyCGAD;jlZ5mpfl7fy z>G$GJ?-?628{nmRz*2vZ9#LNO^z@A;NS z0c=_0_-a0&L8iljE=9Ia9S`!b@O7YEYLli#)+n~TI;{pr8+LmT5g{^%t+Ws?RMIdi zm05;R!aM7e%TCxFGK2I*`5!Kwq|Tlsh?yRL<%&ogofO%#WJ$omg2R{KTVUPhorsF0 ziA9b@wV`}}pbbq@@}fI6IA<}s&h~bGPY6Zj*j5OPm^#+9`4$0!rjXSve{kxj?+#%%3W%<5SOSBj!0VPsmYM~h@){|G>W z>SRpKV`s~t@eW-D<}OQCB_}c|U~JF=$io>bMXynW%4nt+?&u zuQfKPRP-`*Lp*)+%L(u4_)+b_{N((G(5&1|2~8il!K65rc)sc6nGr_YnOVKG1~Ff? z&hCeok;LH$+CsuHkk<MG3QbStG9|$bh2rU4s3UJ`R0aO!nt+;qi z!8=i+1GB>q?yF0jATZqk0;5rz=L~}I*oP~eo&oq%ebYK-x5rjLgKhR}p_UD0_kXRa zkunu1Q%Zef&I{LDI6dUHt9v`=Qgzd+J9F49z|+&%HKe}hU>~MnyfE5l?JK<1R@0x}Gc|#Z84i+C_q)|z~zhgKH-z-4nE7&XOC?Mq+?1>X%PW3vI z%Fh0K9D{xJ>rf@_V7WR!Tb2Iwou-~p->s&VPG|s>ZMg>OS)I#PZ${)_=H@v-;+>(= zv&2!%uC8skqLOOOs#}GG1&$<*3J-zq8j6AvIJd@{ar7d_bxknLFu!RX6LQU9q0i3z zo21G1N;y60N1dXJpII1K^~S9BHKbQSIo;313Ug2g3^S(9YV1A~LF+J!`+_dWs-@I2S2pU`KMDq_zVyjhWW@ z8K0~eqzm6K%lGrnTdjO{H_a7R=LfrT+8ZOZ4fHmrkqu^BbH%XM_rFDcGVN0E*P+MS0d| z%Sn(C2!(K#+-E<>0L1YiDTb6NH-wDhFhmuf$v;^UxmhyYd!sw*%Z)7IVrK5lwE$|u zIw>eA6*ED-4Dea7xm!E6`@PHuzy=d7J@ttn?eHPX56NK|bl_SlRdpQWJnU+us#s+V zWgD#0&JghCV=l|q%*s1i4Nd=7d*2z>)YkoZuZpOsC|r$FL_|P9h|)U>A|({1_b63* z?+`AcAiWdmMM4WjdQU{ER4D;zK{|vI>5!0Q4(fg1|IB}$dFI2E4>O`^2Ck0@~*v#o&%vF~#Q`I1R8>$4zVMse=^a1LXYlGXD$8gv>P=BpauOrgY;1f)% zT#9-5lhB3fHcE4ASFcSgTG~~;puV4WKkO+}c;x(ko>6$_)t$me?b0R>Y_=fLjfu6@ zo|nh|n)W~lwolT5LGv0J8;G}e?S-u2ia|?55U?;d_G@)j?w;#(HH+pjk{d6Ohg`?% zTUuKqjPNmTOyI*649l}g~7B!dFKt&efs7-YT zGJzd#RORXU*8h3bq-~V9lzYz@Z|9Iy+d4!b{0|0D>Z=_crHzi58qb6C6d(By^Ni`+_9fdbzQM37r&O78AM^DsA9 zgcVSP9n=5O?b^@wGD7}rDS*ZV&gILP(zm4W73Tcq2Bz@~`ji%x&ZmSoPxIT$TSvz- zH!W@+*nUH!Z;t^Sf`D!VrMZn(9&6Zidh(*@!DaA_vlv&O2eS`9DQ~l1CEph2j&ycy zWCx&2grku2{j)Y93J1ddlOx>J{zp-DTI};BzF%TDy9i$KyS4e+cQGY*#I!@QRrZVk zCY#{W+=AZm36t#t_Jqk+`)?+jFtB^x#;#iuqoCL*LQ(Mh1a^HZed?VdfMvDW?ud^i z0G#n9+OCxv9t0OQ=~;yK(zNGJI|wTJ6;A$!1Dc5Rze*Rasq$$MhE%Qt{a|WUM3Ry$ zAU*GZHP~uk90SL3GO&E}Fp8>5lHSesgq(iPe^leB_Q3x`s+(88_?n8fvSp>8wTn{p zO6pAArlZV{(lInycJ=6erp4;3jY&;bb)#yRx=zz49`iS?R1!+-2xSwD;n%S#L+H}p zonVLwXAfmJzclW8uD5m6B+^iQdfUCn8Td_-hN@a4crG#kWzCwtAE7ia*RrFKfDiIL zld_tE55Vtm+cZ7Wy=at_S#fPX!IFdtU`9rlHWY5XmT;hyNB+F z<0MT)0H5Y&vhVQBu__@;9sRS&aTf@QyG^@j5Ptd|V?TY5`E5-CE@nvUA5}V+{fnZP zwEQ}OymO9ARRN%lO~tNArI@JQ9nKPQ_>_blOt)2sbrXOzOWLK9p?ZGP~t*o`OD?WY`1)# z)U8uQ4+Q_g8(qP6063+Z8?v+g>&!75uYTZ~UIp0krbGsW(HE#l)CdP#MF^sm>5E^0 zl^W({mW=V$qV(WQrf|TIwy8mp6*o#(PWK6?B1AAXC|+| zZaw8~K&UG$q=SZm8U(430o6r-3Zk#=P7pGqhWnV$yU$k<)c8LHjbP%*bamR7T3HM- zpwmU0Q#GB=0PhcG9Ma zy5xa;D*Zlvarr9K!7>ntW`YC2h%XUf&w7?Y0Jii5{;UXKB>O~l=#3xVV0t&DGwQKC zvCxpT6qLSN+lCmJssbB3P^-u!b75?h0kSYhjIDbNY@01#hsUhxlgY7Bl39O)-VWd! zP!tC%K#bG-d4la_oUSslTgh)Rk>z$BG;Zzqa>Dzxgb0n2xeDWkIs41)dM<}w38D0y zSf|$I-0VD;ele>VDPl;WO3xBuO}f<5u+cKZ=J}=lu{7t7HpeNal9Y_a^fVm#WoKq( zKhXBVSuB7U=&3|MEZP3fETq%`|3Rw z)O0a!n)qsQROw=Uttz3zdpUi0vRR%qhHBT!rE^D`hL zQ9+-WrnmdreN`<3YS?vvFKmijBf`4!eexuY{)4*d-dfLR1OdebyqpLzm3h4r7mcg$ z|Q^66gu%|2P4X_4o1MCQLKHb>`f?ru^OfbZGSZ^ zp$3m?`iJu|rX9B6{@AkG8)XaxP8^hjgoy(LczW`IPz z17u@_+fuDcRl5z61LRCZr1BR>Hq>TtcAHh;nOL&$pWYu)2qnu+X$G-Fb5b*-D)dS1 z2aBiLcVJnwPM*`v$(d!k!$jgR%kZd~ zQ+(Il)enJknrWa7kcs$Mgqe^t*I8Wi0YPez+^g_>p*$f#j1R~U129FQfo($p$5H~J zsC4wJln+#kca@_62kt^^V1EOVAN@HiEWY$`^&h4s6@H~O9w$~}dRTcD5as!b=BRe{ z41S>FELnUz?IVt)LcZRa#RAa1?10p7zk17gk(HnhG@^(HU$^kiD zZY2C8j0bB<&Z@Ip1wcqL8#7~sZe*yi& zM>8JY?yHmn)0#q`&|b>hfdJW}FF^Dn;6Q>>9E^19fzR<`?15744nhc`&ctIJ(ZMaY zDKZV~s`fFM8^=BCMn2{4tju&UdL&|8o0hQjAku8F*IH&}bjISS0hfi;X;{=d$$B7S z=BeU-RYO|K&~yP6kj;;B`DGGZUVJR}cdEX6RHjljV6EiSbbD6PB&OSM?LDFTwRNJ> zd}Ak5K14xQ{Jdv`N`wX0LN$HjaU5SakUs~s zH)J`*xtN2m{l)6&uE^E(zGGazn?FK*at0KuR-Ev#P~WC@2H?(1IqkI>A%p(P7fa5R zkCFM3jmF%wOqpA*V@kfqygtb@OeLoS^wZPXGtPZ_zrDl4*S)E5DNh4GXIh0SUO}yUD;U~?7 zgVS6OqH~=`G5Y=_cNVz+J$i&tlmFUjzA}k#P&U<1fcF6MmRcamOGX? zNHN394EqbpY}#+4Q<7TZd0{0ikhs0^uk(-8=@(wo zwVZLE$4*~c&I~BMuOsD2tg)yVju?o9WGW;+Nu-!g^759O&EiytoN=O3UcpI*|dD#R?X; zHGG9%C+@pR2^0KZaHzm;b)#McN7nKwL`pVS z*7o1vx*=5GUk1re8pfn*CGV2tG;&{@LtyTTZNqT1`?;ga{n)1fT!?j9>^P6K>~I^h|zNx$-Mk* zLC=wvc%*v^EPgO$MSp&+_|dax>nCJYyjjx=Ng-`rNZIPIPD3q;`RO&guW7s*uLV9S zrv`5E3*36WCAezu*yxa3dG~tqt(oI~PvIGt@V)bYQ=b1E1^NOQrB9r++oPJ!?%KKP z2v}7sU89%&r@jaLnh*$>kopdXil~IE%zln0n|P3T`T^J`Ej z&^BjBe(T)bFe%`!?=%shh)OH^P9jgu;#O z^lCXIZiLeJ)lO}HOVs5{c>ufyq_e3#?9;CsZgqMEhA4J^5kG%U{MX2;RU=YwqUdGc znbHCSc+vB^(`L`Vxsvm1yeE)pxfM~?H~02|5C0Y!)Ht{zu6B;~|9|!Wv1J;seG%wR zcg5|OETqbcm^g`av^B6iEG-V9#=ekJJ2@ViZ=IWN3+X{zE-LpX1P&T zTpX>+=eWjvlV5a1yEZ2ecnuD#eWnwDp%8}iR5xE`u`LL}?sveH1_m1d9w1_Jj&`&S zC=8#l`uf%e;OqZQK{rbT#^s^JTr;p8A$>uP$)@V(SCO)(4P--Cfilm5zPu(^8Ri@H2ClyYtSS!s{9f z$UnzS3w*jKKXBj*3(++0K^G-MGd0mrU2UuPv6J8!uMNyZx+BOIjJGi~sN#$?& z`6|5a^$&=M2<$=i;w9*SkE?c;yh8C;MqgQ3j$TN7$GojSJ@lNa536A9gO9zlh4!lY z8*8mID_!o5#2gY>qGA2oS{fa{7ADwchhXz2g{k88UouPb3XFfr>bRN?AKq-#)qt4d z!oJVBWL64W(7|l3ZdQQ^Q-fVs1*+#q?zk*mP@4bZQsyVgGb%B5#u%fpGkW;`WJ_Bk ze!w29&g>fxIAV3Ty7y~6o7mmqHLv6xe`JjEiU|9Of}6&TZYZ{yE|z|qpDw36pBw{U za_=oK7^$u6og8sE5E8wfaF|zeZvSZ%iAdoZ-=%q4K%d&7J}yZ!dT2Pg(ekvjf;&1l zd7;v~T3x^|Qn`k)G;ejK=F4MCiRnt6vlcnkW8?!LPk)={y5W?WX0z zK8^6+`2IS~*V!*~2Txp^`Gz4HjBQRkeo=8R)On@!p5}DutKwoQoh-EwQ+3adu)ekT zWw!QZ%$mcA=Iuc8;KrIO# zqYPgiDtr&Ew7n+E2t%O42=#ljqgLx%}r$(}tm~Bd_ zO8XwvPObzR4PmCxc9=&5wsC;vamop7h#l?y<9mg0|x{*x7ZS z8X^H5g?5$b=qu5b^y@vpT{FUBm=i2e^A+SPYnd`8_+}Iw!%r?$F4MY=JLcr^+tx@Q zg@V3fEnLTRbvYcQ3alSun_g^!iuW7M^G04|_;gOmqJ{e#mWq_dvzM72^r2xq`nlNY zuOkjnlf_uNHLP}EK2LL{F4YPs?JqSuhBYG9rCDwz0WSP};c&5Cx~q%FgU*Dc!Xasl zXgdrZ!6qP5C8~U?e|eoWNnj|V@#7me;tqf|f2bzC^TAGmV@TcI1K*k}hB>*LTrUu0 zg4Gj_EK9TqmxoophDqvmOjxsU7~Ca(A%u; zBk8lvo;4fVwnc`FiPY`Z>^eoVNa32PTd7v(lC4dKuY@c6Yr4n^=$z%W3d#KluTV(O zEOOFdo3dsxML!BlW?v-DpP|!zg`8J>zUx#DO$SRP9!-=YHsj!8YKZXik0bflVZmAd zIu9BG@Uv+evIG4zQ;pmO(xBVgDZC{=q)|2K{n31jqAGPEwG6|0#g}FkWe4%t=&Ywh zu$Iq4q|SM3#r0ZbK&##y>`bc}&C>NV*Do=ld7Sm16^MNyf&5f}-xTxmsSotybY_$YdHzblp{7nLU{^}TCi+o9(bCc$6#8~Ms- zqDslpsrAs?t{hg>J5!|2GT05CIH~V{N1q3N0)OwjO^{uf))^K){bwWWtn}!DN&C`@ zA&^0OxoK!N$bhg6(B&sNEJ7~$DSMyngxNdNsX}jap$*XY-TA3>M!&4Dl(cE&+kLqR z_CJ@`o5*YBnpD3gKDw!(X8>$BW;W#VUDe80PYOSU$5)qYApD1%M=(BYHQjstoudR` zF}iPg?LR*DB2e{plm4En z^coF!VRal#%9ih9l`K-c=8@^j+awh9w)W^q+6$d(jUB~5D|2(uf1k(kebWKv{TkH< z%I-lDI>(@gB*pRom=c8w8=M7o;ukNsF5aV=kgy-tJp-2@X!2>ND3K_;kNh3#SXiwm z@);CZj&2=PU29Qv3<&y^`K@VUvjOoZFChmr;;LP-GoDsA?&w4kWD~ZU^>cQohYnq(Ed_kBCu8m|@ zRj4hoHp2wThp?Ojh2D#nEVkEHW^k0NR#^*pNp3M+l=!xJ%Yw(fKYkIb@BJs2wSmNX zg%)EzAkjlK*+P$Xt>E_l5beQF@aE>LhS>wWjY+aSKPLAGzomk(%$#)AIL;vZ2}8cJ zfYRPgo}f+f5OcDe{GYtMnh5_Q=L+LnlRo)({U9O!vkWu4*IM-E#NN>vhi)2Lf8t_0 z!@vx@^vGhT=5neXyD$9wd3W;QK8?%tW}|D+{-yrlnj$I$%2$^v=QI&R{m-Uh!7-s5 z0!wH~l9Fq-&_8DPiu&;dW3&5>tVxKsO$o_MLYK-5wCWcPP4|mFReD6{qbRr5lRO#? zz^Vh6mb?=fYcQXcirC!!dHo_o949?6+DI8T&w)>;mvf_13=2lgx)W(GM+>V#wFasRkG6#Mlmh(QX= zKrHu$W+dC{w1gAmYu4NeTF;62D39Yo#`0ttTf%p<6OZenfD4got1W0QcXYxO%F0R~ zYvQZsoBza7@aQ0fP59oM;-v*uGgtI&v)BJt18wxs0hE#MeOCIVQ-41TlI^ll!wQyK zm!u@;OM+XQR?KOCY4Qhb&Co{|*?~cV1#4dXws+aUrCWgeBgbI);P+4d3ch`o7FcZ7 zNZuR2_2mlaN)R=m9AY}IU;I7TF3r1^lV>TK(fpS18w9Gh?~hr{g>uWOf__g=aUJx= zFg*0bh5vfIr>5vXIzJs!-qCj-E(Z$Tpqx1ApT5|&?fohr_>{HsyP$xL@Vk!S9qa^B z|MACz9O%!#e?6!FfG>k{x0KO^assn=(5s~@=*XQ{29wv{BDlw2oA!v1_H z`7&L=YluH6NRVw#M;QnU{N4K${r8QGkNr0#t&>|Gr(AXBL7ezumLGs~sjjztF{7&Y z`|ZAWb%{g37`H12!MB4si(SvE8(2EUSU#CjwgxW8j0e|4KOV71zd72C>N@<-gE7{? zS|H$tg&BK1>Mkq2e`=H1Mmk;dAotFA2Gs|!!T7M5z`(%Bjr{}L|1>{gNz!Xo|C;H> znF&Je(#E8)<77?!ECxix&II@lml`s9l1iUYE^Avjytz8%IjT@TNslx3EHDJjxESG=Rp z0E?*}=g*a#{IN@O*TlElV>zm1kauec?pf7DbbdUOqs$H& z-4)C8o^4e$Q#N%5ijlhPZNv*jn+H=d`z(m2YTo=D|v)w8(5F|4!BL zQ>f>L1>^V70AGQ8O+3mONFD;q)XA{uuSqUw09@u#;DTIGa?*8 zdYyvWXv~;tDw|8(aKw8-)UvAO8GV8dsvPReXIlZ2y(AzLs#iR9&9_NH=MF3V_E3}B z(5sD)5Nkmxk-DJ`VX!Lw(&ziuhYX3Bi=a22Zj%O+8{=vR#fguTW6T~niY7RpjeMmw z4liq&wJi_^Gb=Mq!lHSBqb9>YT#<))XfOYuXV%)LR-{V1WV^0Nn8|D+H!PPZ6)c+J ztSG=M8+gBPc|e-jD|0;1w}g<3paD01XPi02rNqfsGwMiIn=iSKnT&pm0flQ#!H8(- zgZ1_P{Wy#~O~1HpQ&U1>W#z3HDV_18SE*2c3}TFY$iMS3=q%~PD#U*V&cXix!g+Mu ze<{5a|E{BZ!i);%c5mO!O67>p_TFnum{Fv3)06>BOP%|9wv=Ahzba8*lX&7|-@}wK?j0B!8V*`` z1=r@HeI8%SmAc$8wU)|E1M$~jN(xTnpV7p7*_Ajhb^m?Lxlljd+)Scu6z-?uNc-*F zi-*e#)C*qM%l6(%5J3nZXfPnu$6YEt@(41O?Pa`L_yHak#JFS8 zE<^}c`YT56z2K8DiLvp<*z%zJoHtEP{VoAA2&tsq1? zc&)5UTOA&s@en}gr^*rJZ*)Rw8(KRvw9<=^qF6{wA8gbEhVWH6hr(87?ZgWQDxY{30PY#3c&K~ zx;n6q&BS!+=dhVAq;$MODQ<_2Ev#oPECgRyQ2N9*{vL)d90Fqw3pi>n28zz)88kZE zZevbUO{{&x@9EN%+kA8>ir~BTB$14vklG0oQ=eaZe|Q>+!`(|3+L2M0u;0xDsY5j@ zR#4-fQ+~tIn8)NbnE;N&Ba$m@W+=fCD-W&8G2i>_Jl`Q*^D0byIhayM_)G(SmY?ZO z$arX`MpMhJU}z*7u)8vdM(uG{&=l^Y$$MpOcdqfz%)|C|YmEY0j-cevAdcf>D;Y_k zDx?|~$h2>dCZpA5+y_KZTbN;Wmf_Kysqk2w=%ttGf`IYrvTFqb2r=|?60N>?yGdjQ z6HtH-Y`=uAntO*4qoG4|pTL`5!7}S}Tm=B)iYqe2W$Jr}pbJ#A-z$|@|@?9>~qaK9kH#>=2qhHK2WQy}*Q2a1mO0ZIA>>b?f92xuu@igw*0yH_Jdi#g_ zdc8w!laM`q;Ro|-rhJ$Bmd_ec_sA^?J+a`<(R~&<$#xj#0eS`q_Q;=}Y`_&+SQqGv zWeDOSEGGsn(7F)&C+lJ@Oh_nde;{SqW1yGDfvN7|W0oZngKQ?Oumiy8DI{EwpymgZ zu;)m@a}5MUDPXgHxnS&^Vql4htk0ll4wAjZy?&ELd1%Mw9AQpw360Nz!5h87m+o??rap1!Op=`vT7WLSfYK zm4EC^FX{E;R==hE^oP`D5cXhLhtYMHe!ZI!?5KF?(vMfwoe16J z*6WrAcQ-(KrQbGGnKlnaEAF6nPsHQu z`=pI@;pF*J5iGaM#Q+_BtI_W73F}`odzv(s*AxWsA{pW&jmS)!G(iBoS?6`M(voJ= zT>GQ;xoIWp7L(RS45U2UfgTW1s~aW~4pZOo#)Oh&0T|ojL3HU6>i)z>KB6nVw1j1^ z^)~<75nGgzsqq8bi4ka~@1_`_2TgpvUf$OzZ+O)*J?scSW#aop*3)YBrd33Ivx0A; z!{^Ub@t7khVTeN5^hAsKPuSCGa?BKAD1f?q6xIEMNCU(px^AgUxsOCN2GFyti&lSB zV*pOSxiHNhjksAmot%uBNtI6hPQc?r^*NN^V$_P$+|@snXaowi&2#L&o35Ji^;X@; zg?=>>dIgl0Is6EerMP;OU9U*2a0!yLX*#k^D6`2gIp^a}D{=q4sMQ*u$g}y-3wSB- z$Aw}RJBVvovo+9@4~ZIe*N+G^x<{GM$&GZh-_(DvFW~HG*p;1UC=(H@k+F4b)2YXD zt#VFU(n`9rW|*ERpT0w-&d_{pnY5yS5bQ@#_<*?Wu?5`t-WE_dV1nD(%Ldtq6h8y= z#3-9|J92de5FB-}?qjMrj`R)olj9BN0pAp|83Ep=VYT$o!j)?T#Ji5(dlarj@0k2x zR$vgzzlXT!i=hScb_KWr8fL=yVnyf90A;98grpZf_xT8yL$sXPY6tuEKgdHtzfS9O z%Sk<0KEU-njuvo0{8A;?aUiYE>xvh*BI=Qy!dwzny|Y}~jU(?<=nd%C!EchZc0t@N zfiz7-I%pN2x}@j>=>-}zDHPM7@>ccu9+66^(>wQ@40_>5^=Shy9J9vX+QqIq7}4td z&cc_QAU6*#=zUvIvz4~*F6nhHQM>gRwz$z{_$(h2rbaRj{zo$;Ti4f39XyLIhyNvm zlCec~N&(Ijf6SXDQl1i8a6Yy3;hUFe!tK7h2B|c;pHRT*wCq2OWSH;Z8o2$A6X#Kg z0i)M*ikI>5Q)J++D0;_wl!Idfm5g1aK>v*~OgTq}*PCo%bP2*~s;-=#vI}V?09IWX zIJ?Ho-T3<*)VB|g3mJR-OdLS1H#+ekw;?7%zaY$x>gUJTAscS-4Q zrYC}8;tH!<$uG8Mk7D1s40zikHHN$xi!2O?7G7M|c}r&?r`7Z|ZN&C~kTP4f`?m_? z3?Od9&zdduh)+8(!7Z)AyU+*^T$U{%EGt39_O+UVYBkw#MmEzB zn-Y=7oPAM|b=HU{zUYyl^K<`{ z0|f0_T1#|qm7R6%K-Z;iXc4OsX6PmOQbR^HkNrEqhf+439F=JFwd3#lEF`TYzlQ}} zTGjXy-7T(0IMLnlUdQEyfZ7;zV2Y{R?}YnPh%-s&k36U9hVh3R19E}In(EB#Z~f(u zGA+o=p2x(Tx0(|M7fzEG;ZTpkWUHv?{Ui{mKNC3OJIks@px&Yi-uhXY(xiswf)}Wn z#`R3pQMJWD|K-~27FJSDSnp+<0j_tPC<@%TDI@J+%lHhwyUTbkCc^z3K3icF?%tJBv*+P0 zj{gZl?L~=Hbf}CyO+)O1Or9PUeRJuJa88(7@x!F)D?LN9CfzDKDTEjs^YYCW7+vC8 z;_@d5)t$HT2GPGFS<9yisNRpTy3!=Fa3vV9_skA1K@#~NZuPu1P~6J9)Hs1psn)13 zGI|=Xtg@u&ZmuUo_Z-*CzKGSL(in0ka2pELq(4Novi)T(-N6TP{8>`j4g+IC}vu$nB zqN7#&0r}}Mr>ArKZ%`kOE0xu*9~sBFY-^qDeIYiOMF*4qWKYXx$>8LZG+)1+MRy z5Kz=DDZ*Shdh2)_u(%H?GM>}Bv=pa(uJh32x%y}sVQsM@>^1;mrHFPGq;j)PBjS9| z=aTMd(5tQZwXz#hgprP5Nqe=b+VxTi$Am-+`bAt2)C9Ep9NS*zP;bI)B*ks-?@a@u z?8s!eMpdj;_A^kXDl6whIXQrRbo~bxKRBQn(Hq8WH|A~N>MK;bAUoojZdIP+Bbl6n zPZSPj@zxVUq%evr5UmpDA5PJHcP?YvhkP?=$9|QzS~dw!eNvD;@;J8EjvAU33)q5T7xcXBI>LCy-?Y){Zs1>?=8S3D4t9NyM z-H*6YYax5VF8OFF)v{R#^I#?bTI7&iZ$DY1 zcmH~HCoOxJRWcjc!XV6^tKCAjdldxrdtNgQti*(~Xu`eEz@nPJNtf`j;UltKmGRHnIc+m&ptFEEkrQ1=)ng{{Vw1IwV>9osTn-@;2qpofy`?2f zZ?0iuV`B~3p4~jvRPr_)t+>Ivh9+({N_cK2h{Y;CnSM6vy_l#wvYNZLK3S`7Sv37d z&Kj=}<6@kiWZ&4>Xkj{#AyrYLMdqI#9kr};y1lC59nG(0_&QrPOYN>yR}eL=u&{84 zX?e6XK@t5ca;#<|hXb|EJ9EWh%BI@a%FuikJHW<`z-@xQx6;ZaBm?`LG~@$7cXxMp z+%6H1rkrf z=IYs?SX#b<6@6l4$*$&9<3`OwCmbgrR5KkzpbF+T=r*es%0k&WI_l!3W9SSxCg?dQ=DarXY{nQrO8N_Jx{ z6nyvBpCh-lNUvWBa#OPzez`p{xCv}blxocc%AXi(co%8|%MXS<9tzT;(3=keOTu~D zM)&2vkHjaxWKxa&Bp}I%Z%G&Q?XNH?OHM!IL1V0#3S{_hd#ryO939Ooof#i5>>oLx z(J1*Kt^T}ieoJr+w7a`IGB(y@Gd(>`Fr>|`akRI`l=z-$9Rl38kT~WAK<*aNvL?WW z125P>68e71v8d7|ELd^=L$tKG*N-e`2b&6VCrf}`n(y5m@$I4G-9M%DxkH@4fBJKS z`mzwvVi7065U-3DytMPUyEGbyK50q`^emDng$1@ZQ^-U>n-n@+QAGcBa{2Gr`|(67 zU`dn+3#YURh?8r7M3^T}{ccv&8T`K_Be6m`O=;b8C buildDeleteRequest( @@ -119,6 +123,9 @@ public void testMultiObjectDeleteNoPermissions() throws Throwable { Path csvPath = maybeGetCsvPath(); S3AFileSystem fs = (S3AFileSystem) csvPath.getFileSystem( getConfiguration()); + // create a span, expect it to be activated. + fs.getAuditSpanSource().createSpan(StoreStatisticNames.OP_DELETE, + csvPath.toString(), null); List keys = buildDeleteRequest( new String[]{ @@ -163,7 +170,9 @@ public void testMultiObjectDeleteMissingEntriesSucceeds() throws Throwable { S3AFileSystem fs = getFileSystem(); List keys = keysToDelete( Lists.newArrayList(new Path(base, "1"), new Path(base, "2"))); - fs.removeKeys(keys, false, null); + try (AuditSpan span = span()) { + fs.removeKeys(keys, false, null); + } } private String join(final Iterable iterable) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java index e6ebfba922d5f..496226caf3da1 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.store.audit.AuditSpan; import org.apache.hadoop.fs.store.EtagChecksum; import org.apache.hadoop.test.LambdaTestUtils; @@ -111,16 +112,18 @@ public void testCreateNonRecursiveParentIsFile() throws IOException { @Test public void testPutObjectDirect() throws Throwable { final S3AFileSystem fs = getFileSystem(); - ObjectMetadata metadata = fs.newObjectMetadata(-1); - metadata.setContentLength(-1); - Path path = path("putDirect"); - final PutObjectRequest put = new PutObjectRequest(fs.getBucket(), - path.toUri().getPath(), - new ByteArrayInputStream("PUT".getBytes()), - metadata); - LambdaTestUtils.intercept(IllegalStateException.class, - () -> fs.putObjectDirect(put)); - assertPathDoesNotExist("put object was created", path); + try (AuditSpan span = span()) { + ObjectMetadata metadata = fs.newObjectMetadata(-1); + metadata.setContentLength(-1); + Path path = path("putDirect"); + final PutObjectRequest put = new PutObjectRequest(fs.getBucket(), + path.toUri().getPath(), + new ByteArrayInputStream("PUT".getBytes()), + metadata); + LambdaTestUtils.intercept(IllegalStateException.class, + () -> fs.putObjectDirect(put)); + assertPathDoesNotExist("put object was created", path); + } } private FSDataOutputStream createNonRecursive(Path path) throws IOException { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMultipartUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMultipartUtils.java index 4746ad5588bd0..818d2fc889c17 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMultipartUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMultipartUtils.java @@ -21,6 +21,8 @@ import com.amazonaws.services.s3.model.MultipartUpload; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.store.audit.AuditSpan; + import org.junit.Test; import java.io.IOException; @@ -57,7 +59,7 @@ protected Configuration createConfiguration() { public void testListMultipartUploads() throws Exception { S3AFileSystem fs = getFileSystem(); Set keySet = new HashSet<>(); - try { + try (AuditSpan span = span()) { // 1. Create NUM_KEYS pending upload parts for (int i = 0; i < NUM_KEYS; i++) { Path filePath = getPartFilename(i); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARemoteFileChanged.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARemoteFileChanged.java index adcf578b05862..770a99599f508 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARemoteFileChanged.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARemoteFileChanged.java @@ -1352,7 +1352,7 @@ private void skipIfVersionPolicyAndNoVersionId(Path testpath) // enabled Assume.assumeTrue( "Target filesystem does not support versioning", - fs.getObjectMetadata(fs.pathToKey(testpath)).getVersionId() != null); + fs.getObjectMetadata(testpath).getVersionId() != null); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardEmptyDirs.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardEmptyDirs.java index bd69ef28669bb..d603d38432285 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardEmptyDirs.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardEmptyDirs.java @@ -20,14 +20,12 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; import java.util.stream.Stream; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.ListObjectsV2Request; import com.amazonaws.services.s3.model.ListObjectsV2Result; import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.S3ObjectSummary; import org.assertj.core.api.Assertions; import org.junit.Test; @@ -40,9 +38,11 @@ import org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.NullMetadataStore; +import org.apache.hadoop.fs.store.audit.AuditSpan; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertRenameOutcome; import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_MKDIRS; import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume; import static org.apache.hadoop.fs.s3a.S3ATestUtils.assumeFilesystemHasMetadatastore; @@ -73,7 +73,9 @@ public void testRenameEmptyDir() throws Throwable { String destDirMarker = fs.pathToKey(destDir) + "/"; // set things up. mkdirs(sourceDir); - // there's source directory marker + // create a span for all the low level operations + span(); + // there's source directory marker+ fs.getObjectMetadata(sourceDirMarker); S3AFileStatus srcStatus = getEmptyDirStatus(sourceDir); assertEquals("Must be an empty dir: " + srcStatus, Tristate.TRUE, @@ -89,6 +91,7 @@ public void testRenameEmptyDir() throws Throwable { // and verify that there's no dir marker hidden under a tombstone intercept(FileNotFoundException.class, () -> Invoker.once("HEAD", sourceDirMarker, () -> { + span(); ObjectMetadata md = fs.getObjectMetadata(sourceDirMarker); return String.format("Object %s of length %d", sourceDirMarker, md.getInstanceLength()); @@ -99,11 +102,14 @@ public void testRenameEmptyDir() throws Throwable { assertEquals("Must not be an empty dir: " + baseStatus, Tristate.FALSE, baseStatus.isEmptyDirectory()); // and verify the dest dir has a marker + span(); fs.getObjectMetadata(destDirMarker); } private S3AFileStatus getEmptyDirStatus(Path dir) throws IOException { - return getFileSystem().innerGetFileStatus(dir, true, StatusProbeEnum.ALL); + try (AuditSpan span = span()) { + return getFileSystem().innerGetFileStatus(dir, true, StatusProbeEnum.ALL); + } } @Test @@ -128,7 +134,7 @@ public void testEmptyDirs() throws Exception { fs.setMetadataStore(configuredMs); // "start cluster" Path newFile = path("existing-dir/new-file"); touch(fs, newFile); - + span(); S3AFileStatus status = fs.innerGetFileStatus(existingDir, true, StatusProbeEnum.ALL); assertEquals("Should not be empty dir", Tristate.FALSE, @@ -137,6 +143,7 @@ public void testEmptyDirs() throws Exception { // 3. Assert that removing the only file the MetadataStore witnessed // being created doesn't cause it to think the directory is now empty. fs.delete(newFile, false); + span(); status = fs.innerGetFileStatus(existingDir, true, StatusProbeEnum.ALL); assertEquals("Should not be empty dir", Tristate.FALSE, status.isEmptyDirectory()); @@ -144,6 +151,7 @@ public void testEmptyDirs() throws Exception { // 4. Assert that removing the final file, that existed "before" // MetadataStore started, *does* cause the directory to be marked empty. fs.delete(existingFile, false); + span(); status = fs.innerGetFileStatus(existingDir, true, StatusProbeEnum.ALL); assertEquals("Should be empty dir now", Tristate.TRUE, status.isEmptyDirectory()); @@ -198,11 +206,9 @@ public void testTombstonesAndEmptyDirectories() throws Throwable { createEmptyObject(fs, childKey); // Do a list - ListObjectsV2Request listReq = new ListObjectsV2Request() - .withBucketName(bucket) - .withPrefix(baseKey) - .withMaxKeys(10) - .withDelimiter("/"); + span(); + ListObjectsV2Request listReq = ctx.getRequestFactory() + .newListObjectsV2Request(baseKey, "/", 10); ListObjectsV2Result listing = s3.listObjectsV2(listReq); // the listing has the first path as a prefix, because of the child @@ -243,6 +249,7 @@ public void testTombstonesAndEmptyDirectories() throws Throwable { } finally { // try to recover from the defective state. + span(); s3.deleteObject(bucket, childKey); fs.delete(lastPath, true); ddbMs.forgetMetadata(firstPath); @@ -272,19 +279,13 @@ private DynamoDBMetadataStore getRequiredDDBMetastore(S3AFileSystem fs) { * @param fs filesystem * @param key key */ - private void createEmptyObject(S3AFileSystem fs, String key) { - final InputStream im = new InputStream() { - @Override - public int read() { - return -1; - } - }; + private void createEmptyObject(S3AFileSystem fs, String key) + throws IOException { - PutObjectRequest putObjectRequest = fs.newPutObjectRequest(key, - fs.newObjectMetadata(0L), - im); - AmazonS3 s3 = fs.getAmazonS3ClientForTesting("PUT"); - s3.putObject(putObjectRequest); + try (AuditSpan span = fs.getAuditSpanSource() + .createSpan(INVOCATION_MKDIRS.getSymbol(), key, null)) { + fs.createMkdirOperationCallbacks().createFakeDirectory(key); + } } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java index e2915884cefa3..624bfd6c94501 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java @@ -22,9 +22,8 @@ import java.net.URI; import com.amazonaws.AmazonClientException; +import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.amazonaws.services.s3.model.UploadPartRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,14 +36,18 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.audit.AuditTestSupport; import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecrets; import org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase; +import org.apache.hadoop.fs.s3a.impl.RequestFactoryImpl; import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; import org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext; import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; import org.apache.hadoop.fs.statistics.DurationTrackerFactory; import org.apache.hadoop.util.Progressable; +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.noopAuditor; import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.stubDurationTrackerFactory; import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; @@ -81,6 +84,16 @@ public class MockS3AFileSystem extends S3AFileSystem { private final Path root; + /** + * This is a request factory whose preparation is a no-op. + */ + public static final RequestFactory REQUEST_FACTORY = + RequestFactoryImpl.builder() + .withRequestPreparer(MockS3AFileSystem::prepareRequest) + .withBucket(BUCKET) + .withEncryptionSecrets(new EncryptionSecrets()) + .build(); + /** * This can be edited to set the log level of events through the * mock FS. @@ -99,6 +112,15 @@ public MockS3AFileSystem(S3AFileSystem mock, root = new Path(FS_URI.toString()); } + private static T prepareRequest(T t) { + return t; + } + + @Override + public RequestFactory getRequestFactory() { + return REQUEST_FACTORY; + } + public Pair getOutcome() { return outcome; @@ -147,8 +169,11 @@ public Path qualify(final Path path) { public void initialize(URI name, Configuration originalConf) throws IOException { conf = originalConf; - writeHelper = new WriteOperationHelper(this, conf, - new EmptyS3AStatisticsContext()); + writeHelper = new WriteOperationHelper(this, + conf, + new EmptyS3AStatisticsContext(), + noopAuditor(conf), + AuditTestSupport.NOOP_SPAN); } @Override @@ -305,17 +330,6 @@ public void incrementPutCompletedStatistics(boolean success, long bytes) { public void incrementPutProgressStatistics(String key, long bytes) { } - @Override - protected void setOptionalMultipartUploadRequestParameters( - InitiateMultipartUploadRequest req) { -// no-op - } - - @Override - protected void setOptionalUploadPartRequestParameters( - UploadPartRequest request) { - } - @Override @SuppressWarnings("deprecation") public long getDefaultBlockSize() { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MultipartTestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MultipartTestUtils.java index 861824277aca9..04c2b2a09bda2 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MultipartTestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MultipartTestUtils.java @@ -22,6 +22,9 @@ import com.amazonaws.services.s3.model.PartETag; import com.amazonaws.services.s3.model.UploadPartRequest; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.io.IOUtils; + import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,7 +57,9 @@ private MultipartTestUtils() { } static void cleanupParts(S3AFileSystem fs, Set keySet) { boolean anyFailure = false; for (IdKey ik : keySet) { - try { + try (AuditSpan span = + fs.createSpan("multipart", ik.key, null)) { + LOG.debug("aborting upload id {}", ik.getUploadId()); fs.abortMultipartUpload(ik.getKey(), ik.getUploadId()); } catch (Exception e) { @@ -69,31 +74,39 @@ static void cleanupParts(S3AFileSystem fs, Set keySet) { public static IdKey createPartUpload(S3AFileSystem fs, String key, int len, int partNo) throws IOException { - WriteOperationHelper writeHelper = fs.getWriteOperationHelper(); - byte[] data = dataset(len, 'a', 'z'); - InputStream in = new ByteArrayInputStream(data); - String uploadId = writeHelper.initiateMultiPartUpload(key); - UploadPartRequest req = writeHelper.newUploadPartRequest(key, uploadId, - partNo, len, in, null, 0L); - PartETag partEtag = fs.uploadPart(req).getPartETag(); - LOG.debug("uploaded part etag {}, upid {}", partEtag.getETag(), uploadId); - return new IdKey(key, uploadId); + try (AuditSpan span = fs.createSpan("multipart", key, null)) { + WriteOperationHelper writeHelper = fs.getWriteOperationHelper(); + byte[] data = dataset(len, 'a', 'z'); + InputStream in = new ByteArrayInputStream(data); + String uploadId = writeHelper.initiateMultiPartUpload(key); + UploadPartRequest req = writeHelper.newUploadPartRequest(key, uploadId, + partNo, len, in, null, 0L); + PartETag partEtag = writeHelper.uploadPart(req).getPartETag(); + LOG.debug("uploaded part etag {}, upid {}", partEtag.getETag(), uploadId); + return new IdKey(key, uploadId); + } } /** Delete any uploads under given path (recursive). Silent on failure. */ public static void clearAnyUploads(S3AFileSystem fs, Path path) { + String key = fs.pathToKey(path); + AuditSpan span = null; try { - String key = fs.pathToKey(path); MultipartUtils.UploadIterator uploads = fs.listUploads(key); + span = fs.createSpan("multipart", path.toString(), null); + final WriteOperationHelper helper + = fs.getWriteOperationHelper(); while (uploads.hasNext()) { MultipartUpload upload = uploads.next(); - fs.getWriteOperationHelper().abortMultipartUpload(upload.getKey(), - upload.getUploadId(), true, LOG_EVENT); LOG.debug("Cleaning up upload: {} {}", upload.getKey(), truncatedUploadId(upload.getUploadId())); + helper.abortMultipartUpload(upload.getKey(), + upload.getUploadId(), true, LOG_EVENT); } } catch (IOException ioe) { LOG.info("Ignoring exception: ", ioe); + } finally { + IOUtils.closeStream(span); } } @@ -131,13 +144,15 @@ public static int countUploadsAt(S3AFileSystem fs, Path path) throws public static List listMultipartUploads(S3AFileSystem fs, String prefix) throws IOException { - return fs - .listMultipartUploads(prefix).stream() - .map(upload -> String.format("Upload to %s with ID %s; initiated %s", - upload.getKey(), - upload.getUploadId(), - S3ATestUtils.LISTING_FORMAT.format(upload.getInitiated()))) - .collect(Collectors.toList()); + try (AuditSpan span = fs.createSpan("multipart", prefix, null)) { + return fs + .listMultipartUploads(prefix).stream() + .map(upload -> String.format("Upload to %s with ID %s; initiated %s", + upload.getKey(), + upload.getUploadId(), + S3ATestUtils.LISTING_FORMAT.format(upload.getInitiated()))) + .collect(Collectors.toList()); + } } @@ -146,7 +161,7 @@ private static String truncatedUploadId(String fullId) { } /** Struct of object key, upload ID. */ - static class IdKey { + public static class IdKey { private String key; private String uploadId; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index 599f18cd9ad62..6a920972bab78 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -67,7 +67,6 @@ import java.io.Closeable; import java.io.File; -import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -91,7 +90,6 @@ import static org.apache.hadoop.fs.s3a.S3ATestConstants.*; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3AUtils.propagateBucketOptions; -import static org.apache.hadoop.test.LambdaTestUtils.eventually; import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.*; @@ -853,21 +851,6 @@ public static void callQuietly(final Logger log, } } - /** - * Call a void operation; any exception raised is logged at info. - * This is for test teardowns. - * @param log log to use. - * @param operation operation to invoke - */ - public static void callQuietly(final Logger log, - final Invoker.VoidOperation operation) { - try { - operation.execute(); - } catch (Exception e) { - log.info(e.toString(), e); - } - } - /** * Deploy a hadoop service: init and start it. * @param conf configuration to use @@ -1449,35 +1432,6 @@ public static void checkListingContainsPath(S3AFileSystem fs, Path filePath) listStatusHasIt); } - /** - * Wait for a deleted file to no longer be visible. - * @param fs filesystem - * @param testFilePath path to query - * @throws Exception failure - */ - public static void awaitDeletedFileDisappearance(final S3AFileSystem fs, - final Path testFilePath) throws Exception { - eventually( - STABILIZATION_TIME, PROBE_INTERVAL_MILLIS, - () -> intercept(FileNotFoundException.class, - () -> fs.getFileStatus(testFilePath))); - } - - /** - * Wait for a file to be visible. - * @param fs filesystem - * @param testFilePath path to query - * @return the file status. - * @throws Exception failure - */ - public static S3AFileStatus awaitFileStatus(S3AFileSystem fs, - final Path testFilePath) - throws Exception { - return (S3AFileStatus) eventually( - STABILIZATION_TIME, PROBE_INTERVAL_MILLIS, - () -> fs.getFileStatus(testFilePath)); - } - /** * This creates a set containing all current threads and some well-known * thread names whose existence should not fail test runs. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java index de27411a41ab8..9ae24c19f3da5 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.s3a.audit.AuditTestSupport; import org.apache.hadoop.fs.s3a.commit.PutTracker; import org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext; import org.apache.hadoop.util.Progressable; @@ -30,6 +31,7 @@ import java.io.IOException; import java.util.concurrent.ExecutorService; +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.noopAuditor; import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -86,9 +88,14 @@ public void testFlushNoOpWhenStreamClosed() throws Exception { public void testWriteOperationHelperPartLimits() throws Throwable { S3AFileSystem s3a = mock(S3AFileSystem.class); when(s3a.getBucket()).thenReturn("bucket"); + when(s3a.getRequestFactory()) + .thenReturn(MockS3AFileSystem.REQUEST_FACTORY); + final Configuration conf = new Configuration(); WriteOperationHelper woh = new WriteOperationHelper(s3a, - new Configuration(), - new EmptyS3AStatisticsContext()); + conf, + new EmptyS3AStatisticsContext(), + noopAuditor(conf), + AuditTestSupport.NOOP_SPAN); ByteArrayInputStream inputStream = new ByteArrayInputStream( "a".getBytes()); // first one works diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AbstractAuditingTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AbstractAuditingTest.java new file mode 100644 index 0000000000000..c76e3fa968f92 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AbstractAuditingTest.java @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.io.IOException; +import java.util.Map; + +import com.amazonaws.services.s3.model.GetObjectMetadataRequest; +import org.junit.After; +import org.junit.Before; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.impl.RequestFactoryImpl; +import org.apache.hadoop.fs.statistics.IOStatisticAssertions; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_GET_FILE_STATUS; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.UNAUDITED_OPERATION; +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.createIOStatisticsStoreForAuditing; +import static org.apache.hadoop.service.ServiceOperations.stopQuietly; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Abstract class for auditor unit tests. + */ +public abstract class AbstractAuditingTest extends AbstractHadoopTestBase { + + protected static final String OPERATION + = INVOCATION_GET_FILE_STATUS.getSymbol(); + + /** + * Logging. + */ + private static final Logger LOG = + LoggerFactory.getLogger(AbstractAuditingTest.class); + + public static final String PATH_1 = "/path1"; + + public static final String PATH_2 = "/path2"; + + /** + * Statistics store with the auditor counters wired up. + */ + private final IOStatisticsStore ioStatistics = + createIOStatisticsStoreForAuditing(); + + private RequestFactory requestFactory; + + private AuditManagerS3A manager; + + @Before + public void setup() throws Exception { + requestFactory = RequestFactoryImpl.builder() + .withBucket("bucket") + .build(); + manager = AuditIntegration.createAndStartAuditManager( + createConfig(), + ioStatistics); + } + + /** + * Create config. + * @return config to use when creating a manager + */ + protected abstract Configuration createConfig(); + + @After + public void teardown() { + stopQuietly(manager); + } + + protected IOStatisticsStore getIOStatistics() { + return ioStatistics; + } + + protected RequestFactory getRequestFactory() { + return requestFactory; + } + + protected AuditManagerS3A getManager() { + return manager; + } + + /** + * Assert that a specific span is active. + * This matches on the wrapped spans. + * @param span span to assert over. + */ + protected void assertActiveSpan(final AuditSpan span) { + assertThat(activeSpan()) + .isSameAs(span); + } + + /** + * Assert a span is unbound/invalid. + * @param span span to assert over. + */ + protected void assertUnbondedSpan(final AuditSpan span) { + assertThat(span.isValidSpan()) + .describedAs("Validity of %s", span) + .isFalse(); + } + + protected AuditSpanS3A activeSpan() { + return manager.getActiveAuditSpan(); + } + + /** + * Create a head request and pass it through the manager's beforeExecution() + * callback. + * @return a processed request. + */ + protected GetObjectMetadataRequest head() { + return manager.beforeExecution( + requestFactory.newGetObjectMetadataRequest("/")); + } + + /** + * Assert a head request fails as there is no + * active span. + */ + protected void assertHeadUnaudited() throws Exception { + intercept(AuditFailureException.class, + UNAUDITED_OPERATION, this::head); + } + + /** + * Assert that the audit failure is of a given value. + * Returns the value to assist in chaining, + * @param expected expected value + * @return the expected value. + */ + protected long verifyAuditFailureCount( + final long expected) { + return verifyCounter(Statistic.AUDIT_FAILURE, expected); + } + + /** + * Assert that the audit execution count + * is of a given value. + * Returns the value to assist in chaining, + * @param expected expected value + * @return the expected value. + */ + protected long verifyAuditExecutionCount( + final long expected) { + return verifyCounter(Statistic.AUDIT_REQUEST_EXECUTION, expected); + } + + /** + * Assert that a statistic counter is of a given value. + * Returns the value to assist in chaining, + * @param statistic statistic to check + * @param expected expected value + * @return the expected value. + */ + protected long verifyCounter(final Statistic statistic, + final long expected) { + IOStatisticAssertions.assertThatStatisticCounter( + ioStatistics, + statistic.getSymbol()) + .isEqualTo(expected); + return expected; + } + + /** + * Create and switch to a span. + * @return a span + */ + protected AuditSpanS3A span() throws IOException { + AuditSpanS3A span = manager.createSpan(OPERATION, PATH_1, PATH_2); + assertThat(span) + .matches(AuditSpan::isValidSpan); + return span; + } + + /** + * Assert the map contains the expected (key, value). + * @param params map of params + * @param key key + * @param expected expected value. + */ + protected void assertMapContains(final Map params, + final String key, final String expected) { + assertThat(params.get(key)) + .describedAs(key) + .isEqualTo(expected); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AccessCheckingAuditor.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AccessCheckingAuditor.java new file mode 100644 index 0000000000000..e8505016c0b31 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AccessCheckingAuditor.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.audit.impl.NoopAuditor; + +/** + * Noop auditor which lets access checks be enabled/disabled. + */ +public class AccessCheckingAuditor extends NoopAuditor { + + public static final String CLASS = + "org.apache.hadoop.fs.s3a.audit.AccessCheckingAuditor"; + + /** Flag to enable/disable access. */ + private boolean accessAllowed = true; + + public AccessCheckingAuditor() { + } + + public void setAccessAllowed(final boolean accessAllowed) { + this.accessAllowed = accessAllowed; + } + + @Override + public boolean checkAccess(final Path path, + final S3AFileStatus status, + final FsAction mode) + throws IOException { + return accessAllowed; + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AuditTestSupport.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AuditTestSupport.java new file mode 100644 index 0000000000000..9519c394ce23d --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/AuditTestSupport.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.fs.s3a.audit.impl.NoopAuditManagerS3A; +import org.apache.hadoop.fs.s3a.audit.impl.NoopAuditor; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.fs.store.audit.AuditSpan; + +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_ACCESS_CHECK_FAILURE; +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_FAILURE; +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_REQUEST_EXECUTION; +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_SPAN_CREATION; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.AUDIT_REQUEST_HANDLERS; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.AUDIT_SERVICE_CLASSNAME; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.LOGGING_AUDIT_SERVICE; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.NOOP_AUDIT_SERVICE; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.REFERRER_HEADER_ENABLED; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.REJECT_OUT_OF_SPAN_OPERATIONS; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; + +/** + * Support for auditing in testing. + */ +public final class AuditTestSupport { + + private AuditTestSupport() { + } + + /** + * Reusable no-op span instance. + */ + public static final AuditSpan NOOP_SPAN = NoopAuditManagerS3A + .createNewSpan("noop", null, null); + + /** + * Create, init and start a no-op auditor instance. + * @param conf configuration. + * @return a started instance. + */ + public static OperationAuditor noopAuditor(Configuration conf) { + return NoopAuditor.createAndStartNoopAuditor(conf, null); + } + + /** + * Create config for no-op auditor. + * @return config with nothing but the no-op audit service set up. + */ + public static Configuration noopAuditConfig() { + final Configuration conf = new Configuration(false); + conf.set( + AUDIT_SERVICE_CLASSNAME, NOOP_AUDIT_SERVICE); + return conf; + } + + /** + * Create config for logging auditor which + * rejects out of span operations. + * @return config + */ + public static Configuration loggingAuditConfig() { + return enableLoggingAuditor(new Configuration(false)); + } + + /** + * Patch the configuration to support the logging auditor and + * rejects out of span operations. + * @param conf config to patch. + * @return the config + */ + public static Configuration enableLoggingAuditor(final Configuration conf) { + conf.set(AUDIT_SERVICE_CLASSNAME, LOGGING_AUDIT_SERVICE); + conf.setBoolean(REJECT_OUT_OF_SPAN_OPERATIONS, true); + return conf; + } + + /** + * Create IOStatistics store with the auditor counters wired up. + * @return an IOStatistics store to pass to audit managers. + */ + public static IOStatisticsStore createIOStatisticsStoreForAuditing() { + return iostatisticsStore() + .withCounters( + AUDIT_ACCESS_CHECK_FAILURE.getSymbol(), + AUDIT_FAILURE.getSymbol(), + AUDIT_REQUEST_EXECUTION.getSymbol(), + AUDIT_SPAN_CREATION.getSymbol()) + .build(); + } + + /** + * Remove all overridden values for + * the test bucket/global in the given config. + * @param conf configuration to patch + * @return the configuration. + */ + public static Configuration resetAuditOptions(Configuration conf) { + S3ATestUtils.removeBaseAndBucketOverrides(conf, + REFERRER_HEADER_ENABLED, + REJECT_OUT_OF_SPAN_OPERATIONS, + AUDIT_REQUEST_HANDLERS, + AUDIT_SERVICE_CLASSNAME); + return conf; + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditAccessChecks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditAccessChecks.java new file mode 100644 index 0000000000000..bd145fa9c7403 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditAccessChecks.java @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.io.FileNotFoundException; +import java.io.IOException; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.performance.AbstractS3ACostTest; +import org.apache.hadoop.security.AccessControlException; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_ACCESS_CHECK_FAILURE; +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_REQUEST_EXECUTION; +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_ACCESS; +import static org.apache.hadoop.fs.s3a.Statistic.STORE_IO_REQUEST; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.AUDIT_SERVICE_CLASSNAME; +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.resetAuditOptions; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.FILE_STATUS_ALL_PROBES; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.FILE_STATUS_FILE_PROBE; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.ROOT_FILE_STATUS_PROBE; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; + +/** + * Test S3A FS Access permit/deny is passed through all the way to the + * auditor. + * Uses {@link AccessCheckingAuditor} to enable/disable access. + * There are not currently any contract tests for this; behaviour + * based on base FileSystem implementation. + */ +public class ITestAuditAccessChecks extends AbstractS3ACostTest { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestAuditAccessChecks.class); + + private AccessCheckingAuditor auditor; + + public ITestAuditAccessChecks() { + super(true); + } + + @Override + public Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + resetAuditOptions(conf); + conf.set(AUDIT_SERVICE_CLASSNAME, AccessCheckingAuditor.CLASS); + return conf; + } + + @Override + public void setup() throws Exception { + super.setup(); + auditor = (AccessCheckingAuditor) getFileSystem().getAuditor(); + } + + @Test + public void testFileAccessAllowed() throws Throwable { + describe("Enable checkaccess and verify it works with expected" + + " statistics"); + auditor.setAccessAllowed(true); + Path path = methodPath(); + S3AFileSystem fs = getFileSystem(); + touch(fs, path); + verifyMetrics( + () -> access(fs, path), + with(INVOCATION_ACCESS, 1), + whenRaw(FILE_STATUS_FILE_PROBE)); + } + + @Test + public void testDirAccessAllowed() throws Throwable { + describe("Enable checkaccess and verify it works with a dir"); + auditor.setAccessAllowed(true); + Path path = methodPath(); + S3AFileSystem fs = getFileSystem(); + mkdirs(path); + verifyMetrics( + () -> access(fs, path), + with(INVOCATION_ACCESS, 1), + whenRaw(FILE_STATUS_ALL_PROBES)); + } + + @Test + public void testRootDirAccessAllowed() throws Throwable { + describe("Enable checkaccess and verify root dir access"); + auditor.setAccessAllowed(true); + Path path = new Path("/"); + S3AFileSystem fs = getFileSystem(); + mkdirs(path); + verifyMetrics( + () -> access(fs, path), + with(INVOCATION_ACCESS, 1), + whenRaw(ROOT_FILE_STATUS_PROBE)); + } + + /** + * If the test auditor blocks access() calls, then + * the audit will fall after checking to see if the file + * exists. + */ + @Test + public void testFileAccessDenied() throws Throwable { + describe("Disable checkaccess and verify it fails"); + auditor.setAccessAllowed(false); + Path path = methodPath(); + S3AFileSystem fs = getFileSystem(); + touch(fs, path); + verifyMetricsIntercepting( + AccessControlException.class, + "\"" + path + "\"", + () -> access(fs, path), + with(INVOCATION_ACCESS, 1), + with(AUDIT_ACCESS_CHECK_FAILURE, 1), + // one S3 request: a HEAD. + with(AUDIT_REQUEST_EXECUTION, 1), + whenRaw(FILE_STATUS_FILE_PROBE)); + } + + /** + * If the test auditor blocks access() calls, then + * the audit will fall after checking to see if the directory + * exists. + */ + @Test + public void testDirAccessDenied() throws Throwable { + describe("Disable checkaccess and verify it dir access denied"); + auditor.setAccessAllowed(false); + Path path = methodPath(); + S3AFileSystem fs = getFileSystem(); + mkdirs(path); + verifyMetricsIntercepting( + AccessControlException.class, + "\"" + path + "\"", + () -> access(fs, path), + with(INVOCATION_ACCESS, 1), + // two S3 requests: a HEAD and a LIST. + with(AUDIT_REQUEST_EXECUTION, 2), + with(STORE_IO_REQUEST, 2), + with(AUDIT_ACCESS_CHECK_FAILURE, 1), + whenRaw(FILE_STATUS_ALL_PROBES)); + } + + /** + * Missing path will fail with FNFE irrespective of + * the access permission. + */ + @Test + public void testMissingPathAccessFNFE() throws Throwable { + describe("access() on missing path goes to S3 and fails with FNFE"); + auditor.setAccessAllowed(false); + Path path = methodPath(); + S3AFileSystem fs = getFileSystem(); + verifyMetricsIntercepting( + FileNotFoundException.class, + path.toString(), + () -> access(fs, path), + with(INVOCATION_ACCESS, 1), + // two S3 requests: a HEAD and a LIST. + with(AUDIT_REQUEST_EXECUTION, 2), + with(AUDIT_ACCESS_CHECK_FAILURE, 0), + whenRaw(FILE_STATUS_ALL_PROBES)); + } + + /** + * Call {@link S3AFileSystem#access(Path, FsAction)}. + * @param fs filesystem + * @param path path to check access + * @return the IOStatistics + * @throws AccessControlException access denied + * @throws IOException failure, including permission failure. + */ + private String access(final S3AFileSystem fs, final Path path) + throws AccessControlException, IOException { + fs.access(path, FsAction.ALL); + return ioStatisticsToPrettyString(fs.getIOStatistics()); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManager.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManager.java new file mode 100644 index 0000000000000..287fe51b5ea29 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManager.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.nio.file.AccessDeniedException; + +import org.assertj.core.api.Assertions; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.WriteOperationHelper; +import org.apache.hadoop.fs.s3a.performance.AbstractS3ACostTest; +import org.apache.hadoop.fs.statistics.IOStatistics; + +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_FAILURE; +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_REQUEST_EXECUTION; +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.enableLoggingAuditor; +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.resetAuditOptions; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.AUDIT_REQUEST_HANDLERS; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.UNAUDITED_OPERATION; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticCounter; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupCounterStatistic; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +/** + * Test audit manager invocation by making assertions + * about the statistics of audit request execution + * {@link org.apache.hadoop.fs.s3a.Statistic#AUDIT_REQUEST_EXECUTION} + * and + * {@link org.apache.hadoop.fs.s3a.Statistic#AUDIT_FAILURE}. + */ +public class ITestAuditManager extends AbstractS3ACostTest { + + public ITestAuditManager() { + super(true); + } + + @Override + public Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + resetAuditOptions(conf); + enableLoggingAuditor(conf); + conf.set(AUDIT_REQUEST_HANDLERS, + SimpleAWSRequestHandler.CLASS); + return conf; + } + + /** + * Get the FS IOStatistics. + * @return the FS live IOSTats. + */ + private IOStatistics iostats() { + return getFileSystem().getIOStatistics(); + } + + /** + * Verify that operations outside a span are rejected + * by ensuring that the thread is outside a span, create + * a write operation helper, then + * reject it. + */ + @Test + public void testInvokeOutOfSpanRejected() throws Throwable { + describe("Operations against S3 will be rejected outside of a span"); + final S3AFileSystem fs = getFileSystem(); + final long failures0 = lookupCounterStatistic(iostats(), + AUDIT_FAILURE.getSymbol()); + final long exec0 = lookupCounterStatistic(iostats(), + AUDIT_REQUEST_EXECUTION.getSymbol()); + // API call + // create and close a span, so the FS is not in a span. + fs.createSpan("span", null, null).close(); + + // this will be out of span + final WriteOperationHelper writer + = fs.getWriteOperationHelper(); + + // which can be verified + Assertions.assertThat(writer.getAuditSpan()) + .matches(s -> !s.isValidSpan(), "Span is not valid"); + + // an S3 API call will fail and be mapped to access denial. + final AccessDeniedException ex = intercept( + AccessDeniedException.class, UNAUDITED_OPERATION, () -> + writer.listMultipartUploads("/")); + + // verify the type of the inner cause, throwing the outer ex + // if it is null or a different class + if (!(ex.getCause() instanceof AuditFailureException)) { + throw ex; + } + + assertThatStatisticCounter(iostats(), AUDIT_REQUEST_EXECUTION.getSymbol()) + .isGreaterThan(exec0); + assertThatStatisticCounter(iostats(), AUDIT_FAILURE.getSymbol()) + .isGreaterThan(failures0); + } + + @Test + public void testRequestHandlerBinding() throws Throwable { + describe("Verify that extra request handlers can be added and that they" + + " will be invoked during request execution"); + final long baseCount = SimpleAWSRequestHandler.getInvocationCount(); + final S3AFileSystem fs = getFileSystem(); + final long exec0 = lookupCounterStatistic(iostats(), + AUDIT_REQUEST_EXECUTION.getSymbol()); + // API call + fs.getBucketLocation(); + // which MUST have ended up calling the extension request handler + Assertions.assertThat(SimpleAWSRequestHandler.getInvocationCount()) + .describedAs("Invocation count of plugged in request handler") + .isGreaterThan(baseCount); + assertThatStatisticCounter(iostats(), AUDIT_REQUEST_EXECUTION.getSymbol()) + .isGreaterThan(exec0); + assertThatStatisticCounter(iostats(), AUDIT_FAILURE.getSymbol()) + .isZero(); + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/SimpleAWSRequestHandler.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/SimpleAWSRequestHandler.java new file mode 100644 index 0000000000000..6f5a0445a92f7 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/SimpleAWSRequestHandler.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.util.concurrent.atomic.AtomicLong; + +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.handlers.RequestHandler2; + +/** + * Simple AWS handler to verify dynamic loading of extra request + * handlers during auditing setup. + * The invocation counter tracks the count of calls to + * {@link #beforeExecution(AmazonWebServiceRequest)}. + */ +public final class SimpleAWSRequestHandler extends RequestHandler2 { + + public static final String CLASS + = "org.apache.hadoop.fs.s3a.audit.SimpleAWSRequestHandler"; + + /** Count of invocations. */ + private static final AtomicLong INVOCATIONS = new AtomicLong(0); + + @Override + public AmazonWebServiceRequest beforeExecution( + final AmazonWebServiceRequest request) { + INVOCATIONS.incrementAndGet(); + return request; + } + + /** + * Get the count of invocations. + * @return a natural number. + */ + public static long getInvocationCount() { + return INVOCATIONS.get(); + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditIntegration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditIntegration.java new file mode 100644 index 0000000000000..bd552b91aadcb --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditIntegration.java @@ -0,0 +1,216 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.io.IOException; +import java.nio.file.AccessDeniedException; +import java.util.List; + +import com.amazonaws.DefaultRequest; +import com.amazonaws.handlers.RequestHandler2; +import com.amazonaws.services.s3.model.GetObjectMetadataRequest; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.audit.impl.NoopAuditor; +import org.apache.hadoop.fs.s3a.impl.RequestFactoryImpl; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.service.Service; +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.fs.s3a.S3AUtils.translateException; +import static org.apache.hadoop.fs.s3a.audit.AuditIntegration.attachSpanToRequest; +import static org.apache.hadoop.fs.s3a.audit.AuditIntegration.retrieveAttachedSpan; +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.createIOStatisticsStoreForAuditing; +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.noopAuditConfig; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.AUDIT_REQUEST_HANDLERS; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.AUDIT_SERVICE_CLASSNAME; +import static org.apache.hadoop.service.ServiceAssert.assertServiceStateStarted; +import static org.apache.hadoop.service.ServiceAssert.assertServiceStateStopped; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Unit tests for auditing. + */ +public class TestAuditIntegration extends AbstractHadoopTestBase { + + private final IOStatisticsStore ioStatistics = + createIOStatisticsStoreForAuditing(); + + /** + * AuditFailureException is mapped to AccessDeniedException. + */ + @Test + public void testExceptionTranslation() throws Throwable { + intercept(AccessDeniedException.class, + () -> { + throw translateException("test", "/", + new AuditFailureException("should be translated")); + }); + } + + /** + * Create a no-op auditor. + */ + @Test + public void testNoOpAuditorInstantiation() throws Throwable { + OperationAuditor auditor = createAndStartNoopAuditor( + ioStatistics); + assertThat(auditor) + .describedAs("No-op auditor") + .isInstanceOf(NoopAuditor.class) + .satisfies(o -> o.isInState(Service.STATE.STARTED)); + } + + /** + * Create a no-op auditor through AuditIntegration, just as + * the audit manager does. + * @param store stats store. + * @return a started auditor + */ + private NoopAuditor createAndStartNoopAuditor( + final IOStatisticsStore store) + throws IOException { + Configuration conf = noopAuditConfig(); + OperationAuditorOptions options = + OperationAuditorOptions.builder() + .withConfiguration(conf) + .withIoStatisticsStore(store); + OperationAuditor auditor = + AuditIntegration.createAndInitAuditor(conf, + AUDIT_SERVICE_CLASSNAME, + options); + assertThat(auditor) + .describedAs("No-op auditor") + .isInstanceOf(NoopAuditor.class) + .satisfies(o -> o.isInState(Service.STATE.INITED)); + auditor.start(); + return (NoopAuditor) auditor; + } + + /** + * The auditor class has to exist. + */ + @Test + public void testCreateNonexistentAuditor() throws Throwable { + final Configuration conf = new Configuration(); + OperationAuditorOptions options = + OperationAuditorOptions.builder() + .withConfiguration(conf) + .withIoStatisticsStore(ioStatistics); + conf.set(AUDIT_SERVICE_CLASSNAME, "not.a.known.class"); + intercept(RuntimeException.class, () -> + AuditIntegration.createAndInitAuditor(conf, + AUDIT_SERVICE_CLASSNAME, + options)); + } + + /** + * The audit manager creates the auditor the config tells it to; + * this will have the same lifecycle as the manager. + */ + @Test + public void testAuditManagerLifecycle() throws Throwable { + AuditManagerS3A manager = AuditIntegration.createAndStartAuditManager( + noopAuditConfig(), + ioStatistics); + OperationAuditor auditor = manager.getAuditor(); + assertServiceStateStarted(auditor); + manager.close(); + assertServiceStateStopped(auditor); + } + + @Test + public void testSingleRequestHandler() throws Throwable { + AuditManagerS3A manager = AuditIntegration.createAndStartAuditManager( + noopAuditConfig(), + ioStatistics); + List handlers + = manager.createRequestHandlers(); + assertThat(handlers) + .hasSize(1); + RequestHandler2 handler = handlers.get(0); + RequestFactory requestFactory = RequestFactoryImpl.builder() + .withBucket("bucket") + .build(); + // test the basic pre-request sequence while avoiding + // the complexity of recreating the full sequence + // (and probably getting it wrong) + GetObjectMetadataRequest r + = requestFactory.newGetObjectMetadataRequest("/"); + DefaultRequest dr = new DefaultRequest(r, "S3"); + assertThat(handler.beforeMarshalling(r)) + .isNotNull(); + assertThat(handler.beforeExecution(r)) + .isNotNull(); + handler.beforeRequest(dr); + + } + + /** + * Register a second handler, verify it makes it to the list. + */ + @Test + public void testRequestHandlerLoading() throws Throwable { + Configuration conf = noopAuditConfig(); + conf.setClassLoader(this.getClass().getClassLoader()); + conf.set(AUDIT_REQUEST_HANDLERS, + SimpleAWSRequestHandler.CLASS); + AuditManagerS3A manager = AuditIntegration.createAndStartAuditManager( + conf, + ioStatistics); + assertThat(manager.createRequestHandlers()) + .hasSize(2) + .hasAtLeastOneElementOfType(SimpleAWSRequestHandler.class); + } + + @Test + public void testLoggingAuditorBinding() throws Throwable { + AuditManagerS3A manager = AuditIntegration.createAndStartAuditManager( + AuditTestSupport.loggingAuditConfig(), + ioStatistics); + OperationAuditor auditor = manager.getAuditor(); + assertServiceStateStarted(auditor); + manager.close(); + assertServiceStateStopped(auditor); + } + + @Test + public void testNoopAuditManager() throws Throwable { + AuditManagerS3A manager = AuditIntegration.stubAuditManager(); + assertThat(manager.createStateChangeListener()) + .describedAs("transfer state change listener") + .isNotNull(); + } + + @Test + public void testSpanAttachAndRetrieve() throws Throwable { + AuditManagerS3A manager = AuditIntegration.stubAuditManager(); + + AuditSpanS3A span = manager.createSpan("op", null, null); + GetObjectMetadataRequest request = + new GetObjectMetadataRequest("bucket", "key"); + attachSpanToRequest(request, span); + AWSAuditEventCallbacks callbacks = retrieveAttachedSpan(request); + assertThat(callbacks).isSameAs(span); + + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditSpanLifecycle.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditSpanLifecycle.java new file mode 100644 index 0000000000000..608667d9dfed8 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditSpanLifecycle.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.util.List; + +import com.amazonaws.handlers.RequestHandler2; +import org.junit.Before; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.store.audit.AuditSpan; + +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.noopAuditConfig; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Unit tests related to span lifecycle. + */ +public class TestAuditSpanLifecycle extends AbstractAuditingTest { + + private AuditSpan resetSpan; + + @Before + public void setup() throws Exception { + super.setup(); + resetSpan = getManager().getActiveAuditSpan(); + } + + protected Configuration createConfig() { + return noopAuditConfig(); + } + + /** + * Core lifecycle (remember: the service has already been started). + */ + @Test + public void testStop() throws Throwable { + getManager().stop(); + } + + @Test + public void testCreateRequestHandlers() throws Throwable { + List handlers + = getManager().createRequestHandlers(); + assertThat(handlers).isNotEmpty(); + } + + @Test + public void testInitialSpanIsInvalid() throws Throwable { + assertThat(resetSpan) + .matches(f -> !f.isValidSpan(), "is invalid"); + } + + @Test + public void testCreateCloseSpan() throws Throwable { + AuditSpan span = getManager().createSpan("op", null, null); + assertThat(span) + .matches(AuditSpan::isValidSpan, "is valid"); + assertActiveSpan(span); + // activation when already active is no-op + span.activate(); + assertActiveSpan(span); + // close the span + span.close(); + // the original span is restored. + assertActiveSpan(resetSpan); + } + + @Test + public void testSpanActivation() throws Throwable { + // real activation switches spans in the current thead. + + AuditSpan span1 = getManager().createSpan("op1", null, null); + AuditSpan span2 = getManager().createSpan("op2", null, null); + assertActiveSpan(span2); + // switch back to span 1 + span1.activate(); + assertActiveSpan(span1); + // then to span 2 + span2.activate(); + assertActiveSpan(span2); + span2.close(); + + assertActiveSpan(resetSpan); + span1.close(); + assertActiveSpan(resetSpan); + } + + @Test + public void testSpanDeactivation() throws Throwable { + AuditSpan span1 = getManager().createSpan("op1", null, null); + AuditSpan span2 = getManager().createSpan("op2", null, null); + assertActiveSpan(span2); + + // this doesn't close as it is not active + span1.close(); + assertActiveSpan(span2); + span2.close(); + assertActiveSpan(resetSpan); + } + + @Test + public void testResetSpanCannotBeClosed() throws Throwable { + + assertThat(resetSpan) + .matches(f -> !f.isValidSpan(), "is invalid"); + // create a new span + AuditSpan span1 = getManager().createSpan("op1", null, null); + // switch to the reset span and then close it. + resetSpan.activate(); + resetSpan.close(); + assertActiveSpan(resetSpan); + span1.close(); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestHttpReferrerAuditHeader.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestHttpReferrerAuditHeader.java new file mode 100644 index 0000000000000..b653d24d4168a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestHttpReferrerAuditHeader.java @@ -0,0 +1,323 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import java.net.URISyntaxException; +import java.util.Map; +import java.util.regex.Matcher; + +import com.amazonaws.services.s3.model.GetObjectMetadataRequest; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.audit.impl.LoggingAuditor; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.fs.audit.CommonAuditContext; +import org.apache.hadoop.fs.store.audit.HttpReferrerAuditHeader; +import org.apache.hadoop.security.UserGroupInformation; + +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.loggingAuditConfig; +import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.REFERRER_HEADER_FILTER; +import static org.apache.hadoop.fs.s3a.audit.S3LogParser.*; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.HEADER_REFERRER; +import static org.apache.hadoop.fs.store.audit.HttpReferrerAuditHeader.maybeStripWrappedQuotes; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_FILESYSTEM_ID; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_ID; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_OP; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_PATH; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_PATH2; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_PRINCIPAL; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_THREAD0; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_THREAD1; +import static org.apache.hadoop.fs.audit.AuditConstants.PARAM_TIMESTAMP; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Tests for referrer audit header generation/parsing. + */ +public class TestHttpReferrerAuditHeader extends AbstractAuditingTest { + + /** + * Logging. + */ + private static final Logger LOG = + LoggerFactory.getLogger(TestHttpReferrerAuditHeader.class); + + private LoggingAuditor auditor; + + @Before + public void setup() throws Exception { + super.setup(); + + auditor = (LoggingAuditor) getManager().getAuditor(); + } + + /** + * Create the config from {@link AuditTestSupport#loggingAuditConfig()} + * and patch in filtering for fields x1, x2, x3. + * @return a logging configuration. + */ + protected Configuration createConfig() { + final Configuration conf = loggingAuditConfig(); + conf.set(REFERRER_HEADER_FILTER, "x1, x2, x3"); + return conf; + } + + /** + * This verifies that passing a request through the audit manager + * causes the http referrer header to be added, that it can + * be split to query parameters, and that those parameters match + * those of the active wrapped span. + */ + @Test + public void testHttpReferrerPatchesTheRequest() throws Throwable { + AuditSpan span = span(); + long ts = span.getTimestamp(); + GetObjectMetadataRequest request = head(); + Map headers + = request.getCustomRequestHeaders(); + assertThat(headers) + .describedAs("Custom headers") + .containsKey(HEADER_REFERRER); + String header = headers.get(HEADER_REFERRER); + LOG.info("Header is {}", header); + Map params + = HttpReferrerAuditHeader.extractQueryParameters(header); + assertMapContains(params, PARAM_PRINCIPAL, + UserGroupInformation.getCurrentUser().getUserName()); + assertMapContains(params, PARAM_FILESYSTEM_ID, auditor.getAuditorId()); + assertMapContains(params, PARAM_OP, OPERATION); + assertMapContains(params, PARAM_PATH, PATH_1); + assertMapContains(params, PARAM_PATH2, PATH_2); + String threadID = CommonAuditContext.currentThreadID(); + assertMapContains(params, PARAM_THREAD0, threadID); + assertMapContains(params, PARAM_THREAD1, threadID); + assertMapContains(params, PARAM_ID, span.getSpanId()); + assertThat(span.getTimestamp()) + .describedAs("Timestamp of " + span) + .isEqualTo(ts); + + assertMapContains(params, PARAM_TIMESTAMP, + Long.toString(ts)); + } + + /** + * Test that a header with complext paths including spaces + * and colons can be converted to a URI and back again + * without the path getting corrupted. + */ + @Test + public void testHeaderComplexPaths() throws Throwable { + String p1 = "s3a://dotted.bucket/path: value/subdir"; + String p2 = "s3a://key/"; + AuditSpan span = getManager().createSpan(OPERATION, p1, p2); + long ts = span.getTimestamp(); + Map params = issueRequestAndExtractParameters(); + assertMapContains(params, PARAM_PRINCIPAL, + UserGroupInformation.getCurrentUser().getUserName()); + assertMapContains(params, PARAM_FILESYSTEM_ID, auditor.getAuditorId()); + assertMapContains(params, PARAM_OP, OPERATION); + assertMapContains(params, PARAM_PATH, p1); + assertMapContains(params, PARAM_PATH2, p2); + String threadID = CommonAuditContext.currentThreadID(); + assertMapContains(params, PARAM_THREAD0, threadID); + assertMapContains(params, PARAM_THREAD1, threadID); + assertMapContains(params, PARAM_ID, span.getSpanId()); + assertThat(span.getTimestamp()) + .describedAs("Timestamp of " + span) + .isEqualTo(ts); + + assertMapContains(params, PARAM_TIMESTAMP, + Long.toString(ts)); + } + + /** + * Issue a request, then get the header field and parse it to the parameter. + * @return map of query params on the referrer header. + * @throws URISyntaxException failure to parse the header as a URI. + */ + private Map issueRequestAndExtractParameters() + throws URISyntaxException { + head(); + return HttpReferrerAuditHeader.extractQueryParameters( + auditor.getLastHeader()); + } + + + /** + * Test that headers are filtered out if configured. + */ + @Test + public void testHeaderFiltering() throws Throwable { + // add two attributes, x2 will be filtered. + AuditSpan span = getManager().createSpan(OPERATION, null, null); + auditor.addAttribute("x0", "x0"); + auditor.addAttribute("x2", "x2"); + final Map params + = issueRequestAndExtractParameters(); + assertThat(params) + .doesNotContainKey("x2"); + + } + + /** + * A real log entry. + * This is derived from a real log entry on a test run. + * If this needs to be updated, please do it from a real log. + * Splitting this up across lines has a tendency to break things, so + * be careful making changes. + */ + public static final String SAMPLE_LOG_ENTRY = + "183c9826b45486e485693808f38e2c4071004bf5dfd4c3ab210f0a21a4000000" + + " bucket-london" + + " [13/May/2021:11:26:06 +0000]" + + " 109.157.171.174" + + " arn:aws:iam::152813717700:user/dev" + + " M7ZB7C4RTKXJKTM9" + + " REST.PUT.OBJECT" + + " fork-0001/test/testParseBrokenCSVFile" + + " \"PUT /fork-0001/test/testParseBrokenCSVFile HTTP/1.1\"" + + " 200" + + " -" + + " -" + + " 794" + + " 55" + + " 17" + + " \"https://audit.example.org/hadoop/1/op_create/" + + "e8ede3c7-8506-4a43-8268-fe8fcbb510a4-00000278/" + + "?op=op_create" + + "&p1=fork-0001/test/testParseBrokenCSVFile" + + "&pr=alice" + + "&ps=2eac5a04-2153-48db-896a-09bc9a2fd132" + + "&id=e8ede3c7-8506-4a43-8268-fe8fcbb510a4-00000278&t0=154" + + "&fs=e8ede3c7-8506-4a43-8268-fe8fcbb510a4&t1=156&" + + "ts=1620905165700\"" + + " \"Hadoop 3.4.0-SNAPSHOT, java/1.8.0_282 vendor/AdoptOpenJDK\"" + + " -" + + " TrIqtEYGWAwvu0h1N9WJKyoqM0TyHUaY+ZZBwP2yNf2qQp1Z/0=" + + " SigV4" + + " ECDHE-RSA-AES128-GCM-SHA256" + + " AuthHeader" + + " bucket-london.s3.eu-west-2.amazonaws.com" + + " TLSv1.2"; + + private static final String DESCRIPTION = String.format( + "log entry %s split by %s", SAMPLE_LOG_ENTRY, + LOG_ENTRY_PATTERN); + + /** + * Match the log entry and validate the results. + */ + @Test + public void testMatchAWSLogEntry() throws Throwable { + + LOG.info("Matcher pattern is\n'{}'", LOG_ENTRY_PATTERN); + LOG.info("Log entry is\n'{}'", SAMPLE_LOG_ENTRY); + final Matcher matcher = LOG_ENTRY_PATTERN.matcher(SAMPLE_LOG_ENTRY); + + // match the pattern against the entire log entry. + assertThat(matcher.matches()) + .describedAs("matches() " + DESCRIPTION) + .isTrue(); + final int groupCount = matcher.groupCount(); + assertThat(groupCount) + .describedAs("Group count of " + DESCRIPTION) + .isGreaterThanOrEqualTo(AWS_LOG_REGEXP_GROUPS.size()); + + // now go through the groups + + for (String name : AWS_LOG_REGEXP_GROUPS) { + try { + final String group = matcher.group(name); + LOG.info("[{}]: '{}'", name, group); + } catch (IllegalStateException e) { + // group failure + throw new AssertionError("No match for group <" + name + ">: " + + e, e); + } + } + // if you print out the groups as integers, there is duplicate matching + // for some fields. Why? + for (int i = 1; i <= groupCount; i++) { + try { + final String group = matcher.group(i); + LOG.info("[{}]: '{}'", i, group); + } catch (IllegalStateException e) { + // group failure + throw new AssertionError("No match for group " + i + +": "+ e, e); + } + } + + // verb + assertThat(nonBlankGroup(matcher, VERB_GROUP)) + .describedAs("HTTP Verb") + .isEqualTo(S3LogVerbs.PUT); + + // referrer + final String referrer = nonBlankGroup(matcher, REFERRER_GROUP); + Map params + = HttpReferrerAuditHeader.extractQueryParameters(referrer); + LOG.info("Parsed referrer"); + for (Map.Entry entry : params.entrySet()) { + LOG.info("{} = \"{}\"", entry.getKey(), entry.getValue()); + } + } + + /** + * Get a group entry which must be non-blank. + * @param matcher matcher + * @param group group name + * @return value + */ + private String nonBlankGroup(final Matcher matcher, + final String group) { + final String g = matcher.group(group); + assertThat(g) + .describedAs("Value of group %s", group) + .isNotBlank(); + return g; + } + + /** + * Verify the header quote stripping works. + */ + @Test + public void testStripWrappedQuotes() throws Throwable { + expectStrippedField("", ""); + expectStrippedField("\"UA\"", "UA"); + expectStrippedField("\"\"\"\"", ""); + expectStrippedField("\"\"\"b\"", "b"); + } + + /** + * Expect a field with quote stripping to match the expected value. + * @param str string to strip + * @param ex expected value. + */ + private void expectStrippedField(final String str, + final String ex) { + assertThat(maybeStripWrappedQuotes(str)) + .describedAs("Stripped <%s>", str) + .isEqualTo(ex); + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestLoggingAuditor.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestLoggingAuditor.java new file mode 100644 index 0000000000000..8d37b432acb56 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestLoggingAuditor.java @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.audit; + +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CopyPartRequest; +import com.amazonaws.services.s3.transfer.internal.TransferStateChangeListener; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.audit.impl.LoggingAuditor; +import org.apache.hadoop.fs.store.audit.AuditSpan; + +import static org.apache.hadoop.fs.s3a.audit.AuditTestSupport.loggingAuditConfig; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Logging auditor tests. + * By setting the auditor to raise an exception on unaudited spans, + * it is straightforward to determine if an operation was invoked + * outside a span: call it, and if it does not raise an exception, + * all is good. + */ +public class TestLoggingAuditor extends AbstractAuditingTest { + + /** + * Logging. + */ + private static final Logger LOG = + LoggerFactory.getLogger(TestLoggingAuditor.class); + + private LoggingAuditor auditor; + + @Before + public void setup() throws Exception { + super.setup(); + auditor = (LoggingAuditor) getManager().getAuditor(); + } + + /** + * Config has logging auditing and adds SimpleAWSRequestHandler + * too, for testing of that being added to the chain. + * @return a config + */ + protected Configuration createConfig() { + return loggingAuditConfig(); + } + + @Test + public void testToStringRobustness() throws Throwable { + // force in the toString calls so if there are NPE problems + // they will surface irrespective of log settings + LOG.info(getManager().toString()); + LOG.info(auditor.toString()); + } + + /** + * Test span activity with a span being activated/deactivated + * and verification that calls to head() succeed in the span + * and fail outside of it. + */ + @Test + public void testLoggingSpan() throws Throwable { + long executionCount = 0; + long failureCount = 0; + + // create a span + AuditSpan span = span(); + + // which is active + assertActiveSpan(span); + // so requests are allowed + verifyAuditExecutionCount(0); + head(); + verifyAuditExecutionCount(++executionCount); + + // now leave the span + span.deactivate(); + + // head calls are no longer allowed. + verifyAuditFailureCount(failureCount); + assertHeadUnaudited(); + verifyAuditFailureCount(++failureCount); + verifyAuditExecutionCount(++executionCount); + + // spans can be reactivated and used. + span.activate(); + head(); + verifyAuditExecutionCount(++executionCount); + + // its a no-op if the span is already active. + span.activate(); + assertActiveSpan(span); + + // closing a span deactivates it. + span.close(); + + // IO on unaudited spans + assertHeadUnaudited(); + verifyAuditFailureCount(++failureCount); + verifyAuditExecutionCount(++executionCount); + + // and it is harmless to deactivate a span repeatedly. + span.deactivate(); + span.deactivate(); + } + + /** + * Some request types are allowed to execute outside of + * a span. + * Required as the transfer manager runs them in its threads. + */ + @Test + public void testCopyOutsideSpanAllowed() throws Throwable { + getManager().beforeExecution(new CopyPartRequest()); + getManager().beforeExecution(new CompleteMultipartUploadRequest()); + } + + /** + * Outside a span, the transfer state change setup works but + * the call is unaudited. + */ + @Test + public void testTransferStateListenerOutsideSpan() throws Throwable { + TransferStateChangeListener listener + = getManager().createStateChangeListener(); + listener.transferStateChanged(null, null); + assertHeadUnaudited(); + } + + /** + * Outside a span, the transfer state change setup works but + * the call is unaudited. + */ + @Test + public void testTransferStateListenerInSpan() throws Throwable { + + assertHeadUnaudited(); + AuditSpan span = span(); + + // create the listener in the span + TransferStateChangeListener listener + = getManager().createStateChangeListener(); + span.deactivate(); + + // head calls fail + assertHeadUnaudited(); + + // until the state change switches this thread back to the span + listener.transferStateChanged(null, null); + + // which can be probed + assertActiveSpan(span); + + // and executed within + head(); + } + + /** + * You cannot deactivate the unbonded span. + */ + @Test + public void testUnbondedSpanWillNotDeactivate() throws Throwable { + AuditSpan span = activeSpan(); + // the active span is unbonded + assertUnbondedSpan(span); + // deactivate it. + span.deactivate(); + // it is still the active span. + assertActiveSpan(span); + } + + /** + * Spans have a different ID. + * This is clearly not an exhaustive test. + */ + @Test + public void testSpanIdsAreDifferent() throws Throwable { + AuditSpan s1 = span(); + AuditSpan s2 = span(); + assertThat(s1.getSpanId()) + .doesNotMatch(s2.getSpanId()); + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java index 771578001235e..814292c45d83b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java @@ -255,7 +255,8 @@ public void testAssumeRoleBadInnerAuth() throws Exception { conf.set(SECRET_KEY, "not secret"); expectFileSystemCreateFailure(conf, AWSBadRequestException.class, - "IncompleteSignature"); + "not a valid " + + "key=value pair (missing equal-sign) in Authorization header"); } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java index 1df4f9b9cc15c..faef79c1f6379 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.s3a.InconsistentAmazonS3Client; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.WriteOperationHelper; +import org.apache.hadoop.fs.store.audit.AuditSpan; import org.apache.hadoop.fs.s3a.commit.files.SuccessData; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordWriter; @@ -289,10 +290,13 @@ protected int abortMultipartUploadsUnderPath(Path path) throws IOException { S3AFileSystem fs = getFileSystem(); if (fs != null && path != null) { String key = fs.pathToKey(path); - WriteOperationHelper writeOps = fs.getWriteOperationHelper(); - int count = writeOps.abortMultipartUploadsUnderPath(key); - if (count > 0) { - log().info("Multipart uploads deleted: {}", count); + int count = 0; + try (AuditSpan span = span()) { + WriteOperationHelper writeOps = fs.getWriteOperationHelper(); + count = writeOps.abortMultipartUploadsUnderPath(key); + if (count > 0) { + log().info("Multipart uploads deleted: {}", count); + } } return count; } else { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java index 14207e8359788..1e5a9582b2c2d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.s3a.commit.files.SuccessData; import org.apache.hadoop.fs.s3a.commit.magic.MagicS3GuardCommitter; import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.store.audit.AuditSpan; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.MapFile; @@ -177,7 +178,7 @@ public void teardown() throws Exception { describe("teardown"); abortInTeardown.forEach(this::abortJobQuietly); if (outDir != null) { - try { + try (AuditSpan span = span()) { abortMultipartUploadsUnderPath(outDir); cleanupDestDir(); } catch (IOException e) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java index ab2f9a2d316ee..e8c5d888d10dc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java @@ -195,7 +195,8 @@ private void fullThrottle() { setThrottling(FULL_THROTTLE, STANDARD_FAILURE_LIMIT); } - private CommitOperations newCommitOperations() { + private CommitOperations newCommitOperations() + throws IOException { return new CommitOperations(getFileSystem()); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java index aa3cecaf1eb7a..415dcba0f575f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java @@ -38,6 +38,7 @@ import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_XATTR_GET_NAMED; import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.CONTENT_TYPE_OCTET_STREAM; import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.CONTENT_TYPE_APPLICATION_XML; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.CONTENT_TYPE_X_DIRECTORY; import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_CONTENT_LENGTH; import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_CONTENT_TYPE; import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_STANDARD_HEADERS; @@ -163,7 +164,7 @@ public void testXAttrDir() throws Throwable { assertHeader(XA_CONTENT_LENGTH, bytes) .isEqualTo("0"); assertHeaderEntry(xAttrs, XA_CONTENT_TYPE) - .isEqualTo(CONTENT_TYPE_OCTET_STREAM); + .isEqualTo(CONTENT_TYPE_X_DIRECTORY); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestHeaderProcessing.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestHeaderProcessing.java index e0c6feeb256cc..15c7ae917dfc5 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestHeaderProcessing.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestHeaderProcessing.java @@ -33,8 +33,12 @@ import org.junit.Test; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.MockS3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.audit.AuditTestSupport; import org.apache.hadoop.fs.s3a.test.OperationTrackingStore; +import org.apache.hadoop.fs.store.audit.AuditSpan; import org.apache.hadoop.test.HadoopTestBase; import static java.lang.System.currentTimeMillis; @@ -50,8 +54,8 @@ /** * Unit tests of header processing logic in {@link HeaderProcessing}. * Builds up a context accessor where the path - * defined in {@link #MAGIC_PATH} exists and returns object metadata. - * + * defined in {@link #MAGIC_PATH} exists and returns object metadata + * through the HeaderProcessingCallbacks. */ public class TestHeaderProcessing extends HadoopTestBase { @@ -95,7 +99,7 @@ public void setup() throws Exception { Long.toString(MAGIC_LEN)); context = S3ATestUtils.createMockStoreContext(true, new OperationTrackingStore(), CONTEXT_ACCESSORS); - headerProcessing = new HeaderProcessing(context); + headerProcessing = new HeaderProcessing(context, CONTEXT_ACCESSORS); } @Test @@ -203,7 +207,7 @@ public void testMetadataCopySkipsMagicAttribute() throws Throwable { final String owner = "x-header-owner"; final String root = "root"; CONTEXT_ACCESSORS.userHeaders.put(owner, root); - final ObjectMetadata source = context.getContextAccessors() + final ObjectMetadata source = CONTEXT_ACCESSORS .getObjectMetadata(MAGIC_KEY); final Map sourceUserMD = source.getUserMetadata(); Assertions.assertThat(sourceUserMD.get(owner)) @@ -254,9 +258,11 @@ protected void assertAttributeHasValue(final String key, /** * Context accessor with XAttrs returned for the {@link #MAGIC_PATH} * path. + * It also implements the Header Processing Callbacks, + * so those calls are mapped to the same data. */ private static final class XAttrContextAccessor - implements ContextAccessors { + implements ContextAccessors, HeaderProcessing.HeaderProcessingCallbacks { private final Map userHeaders = new HashMap<>(); @@ -291,6 +297,16 @@ public Path makeQualified(final Path path) { return path; } + @Override + public AuditSpan getActiveAuditSpan() { + return AuditTestSupport.NOOP_SPAN; + } + + @Override + public RequestFactory getRequestFactory() { + return MockS3AFileSystem.REQUEST_FACTORY; + } + @Override public ObjectMetadata getObjectMetadata(final String key) throws IOException { @@ -303,6 +319,7 @@ public ObjectMetadata getObjectMetadata(final String key) } else { throw new FileNotFoundException(key); } + } public void setHeader(String key, String val) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java index 42714cb1555e4..0d9ba1d304490 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java @@ -29,7 +29,6 @@ import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.MultiObjectDeleteException; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import com.amazonaws.services.s3.model.ObjectMetadata; import org.assertj.core.api.Assertions; import org.junit.Before; import org.junit.Test; @@ -37,8 +36,12 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.MockS3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.audit.AuditTestSupport; import org.apache.hadoop.fs.s3a.test.OperationTrackingStore; +import org.apache.hadoop.fs.store.audit.AuditSpan; import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.ACCESS_DENIED; import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.removeUndeletedPaths; @@ -257,10 +260,15 @@ public Path makeQualified(final Path path) { } @Override - public ObjectMetadata getObjectMetadata(final String key) - throws IOException { - return new ObjectMetadata(); + public AuditSpan getActiveAuditSpan() { + return AuditTestSupport.NOOP_SPAN; } + + @Override + public RequestFactory getRequestFactory() { + return MockS3AFileSystem.REQUEST_FACTORY; + } + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestRequestFactory.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestRequestFactory.java new file mode 100644 index 0000000000000..c28eb5a9628d3 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestRequestFactory.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.util.ArrayList; +import java.util.concurrent.atomic.AtomicLong; + +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.ObjectMetadata; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; +import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.audit.AWSRequestAnalyzer; +import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecrets; +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Test that the request factory creates requests; factory + * is is built with different options on different test cases. + * Everything goes through {@link AWSRequestAnalyzer} to + * verify it handles every example, and logged so that a manual + * review of the output can show it is valid. + */ +public class TestRequestFactory extends AbstractHadoopTestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(TestRequestFactory.class); + + private final AWSRequestAnalyzer analyzer = new AWSRequestAnalyzer(); + + /** + * Count of requests analyzed via the {@link #a(AmazonWebServiceRequest)} + * call. + */ + private int requestsAnalyzed; + + /** + * No preparer; encryption is set. + */ + @Test + public void testRequestFactoryWithEncryption() throws Throwable { + RequestFactory factory = RequestFactoryImpl.builder() + .withBucket("bucket") + .withEncryptionSecrets( + new EncryptionSecrets(S3AEncryptionMethods.SSE_KMS, + "kms:key")) + .build(); + createFactoryObjects(factory); + } + + /** + * Now add a processor and verify that it was invoked for + * exactly as many requests as were analyzed. + */ + @Test + public void testRequestFactoryWithProcessor() throws Throwable { + CountRequests countRequests = new CountRequests(); + RequestFactory factory = RequestFactoryImpl.builder() + .withBucket("bucket") + .withRequestPreparer(countRequests) + .build(); + + createFactoryObjects(factory); + assertThat(countRequests.counter.get()) + .describedAs("request preparation count") + .isEqualTo(requestsAnalyzed); + } + + private final class CountRequests + implements RequestFactoryImpl.PrepareRequest { + + private final AtomicLong counter = new AtomicLong(); + + @Override + public T prepareRequest(final T t) { + counter.addAndGet(1); + return t; + } + } + + /** + * Analyze the request, log the output, return the info. + * @param request request. + * @param type of request. + * @return value + */ + private AWSRequestAnalyzer.RequestInfo + a(T request) { + AWSRequestAnalyzer.RequestInfo info = analyzer.analyze(request); + LOG.info("{}", info); + requestsAnalyzed++; + return info; + } + + /** + * Create objects through the factory. + * @param factory factory + */ + private void createFactoryObjects(RequestFactory factory) { + String path = "path"; + String path2 = "path2"; + String id = "1"; + ObjectMetadata md = factory.newObjectMetadata(128); + a(factory.newAbortMultipartUploadRequest(path, id)); + a(factory.newCompleteMultipartUploadRequest(path, id, + new ArrayList<>())); + a(factory.newCopyObjectRequest(path, path2, md)); + a(factory.newDeleteObjectRequest(path)); + a(factory.newBulkDeleteRequest(new ArrayList<>(), true)); + a(factory.newDirectoryMarkerRequest(path)); + a(factory.newGetObjectRequest(path)); + a(factory.newGetObjectMetadataRequest(path)); + a(factory.newListMultipartUploadsRequest(path)); + a(factory.newListObjectsV1Request(path, "/", 1)); + a(factory.newListNextBatchOfObjectsRequest(new ObjectListing())); + a(factory.newListObjectsV2Request(path, "/", 1)); + a(factory.newMultipartUploadRequest(path)); + File srcfile = new File("/tmp/a"); + a(factory.newPutObjectRequest(path, + factory.newObjectMetadata(-1), srcfile)); + ByteArrayInputStream stream = new ByteArrayInputStream(new byte[0]); + a(factory.newPutObjectRequest(path, md, stream)); + a(factory.newSelectRequest(path)); + } + + /** + * Multiparts are special so test on their own. + */ + @Test + public void testMultipartUploadRequest() throws Throwable { + CountRequests countRequests = new CountRequests(); + + RequestFactory factory = RequestFactoryImpl.builder() + .withBucket("bucket") + .withRequestPreparer(countRequests) + .build(); + + String path = "path"; + String path2 = "path2"; + String id = "1"; + File srcfile = File.createTempFile("file", ""); + try { + ByteArrayInputStream stream = new ByteArrayInputStream(new byte[0]); + + a(factory.newUploadPartRequest(path, id, 1, 0, stream, null, 0)); + a(factory.newUploadPartRequest(path, id, 2, 128_000_000, + null, srcfile, 0)); + // offset is past the EOF + intercept(IllegalArgumentException.class, () -> + factory.newUploadPartRequest(path, id, 3, 128_000_000, + null, srcfile, 128)); + } finally { + srcfile.delete(); + } + assertThat(countRequests.counter.get()) + .describedAs("request preparation count") + .isEqualTo(requestsAnalyzed); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java index c4f8db71937d0..99a64e6a94268 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.s3a.impl.DirectoryPolicy; import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; import org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum; +import org.apache.hadoop.fs.store.audit.AuditSpan; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; @@ -108,6 +109,18 @@ public AbstractS3ACostTest( this.authoritative = authoritative; } + /** + * Constructor for tests which don't include + * any for S3Guard. + * @param keepMarkers should markers be tested. + */ + public AbstractS3ACostTest( + final boolean keepMarkers) { + this.s3guard = false; + this.keepMarkers = keepMarkers; + this.authoritative = false; + } + @Override public Configuration createConfiguration() { Configuration conf = super.createConfiguration(); @@ -183,6 +196,8 @@ public void setup() throws Exception { deleteMarkerStatistic = isBulkDelete() ? OBJECT_BULK_DELETE_REQUEST : OBJECT_DELETE_REQUEST; + + setSpanSource(fs); } public void assumeUnguarded() { @@ -357,6 +372,7 @@ private void resetStatistics() { protected T verifyMetrics( Callable eval, OperationCostValidator.ExpectedProbe... expected) throws Exception { + span(); return costValidator.exec(eval, expected); } @@ -379,6 +395,7 @@ protected E verifyMetricsIntercepting( String text, Callable eval, OperationCostValidator.ExpectedProbe... expected) throws Exception { + span(); return costValidator.intercepting(clazz, text, eval, expected); } @@ -476,6 +493,8 @@ protected OperationCostValidator.ExpectedProbe whenDeleting( /** * Execute a closure expecting a specific number of HEAD/LIST calls * on raw S3 stores only. The operation is always evaluated. + * A span is always created prior to the invocation; saves trouble + * in tests that way. * @param cost expected cost * @param eval closure to evaluate * @param return type of closure @@ -525,12 +544,14 @@ public void interceptRawGetFileStatusFNFE( boolean needEmptyDirectoryFlag, Set probes, OperationCost cost) throws Exception { - interceptRaw(FileNotFoundException.class, "", - cost, () -> - innerGetFileStatus(getFileSystem(), - path, - needEmptyDirectoryFlag, - probes)); + try (AuditSpan span = span()) { + interceptRaw(FileNotFoundException.class, "", + cost, () -> + innerGetFileStatus(getFileSystem(), + path, + needEmptyDirectoryFlag, + probes)); + } } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java index ed56802ddfec1..a08d77367b18f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java @@ -46,6 +46,7 @@ import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3AUtils; +import org.apache.hadoop.fs.store.audit.AuditSpan; import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; import static org.apache.hadoop.fs.s3a.Constants.AUTHORITATIVE_PATH; @@ -58,6 +59,7 @@ import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.util.functional.RemoteIterators.foreach; /** * This is a test suite designed to verify that directory markers do @@ -223,7 +225,6 @@ public void setup() throws Exception { assume("unguarded FS only", !fs.hasMetadataStore()); s3client = fs.getAmazonS3ClientForTesting("markers"); - bucket = fs.getBucket(); Path base = new Path(methodPath(), "base"); @@ -653,7 +654,9 @@ private void head404(final String key) throws Exception { } /** - * Execute an operation; transate AWS exceptions. + * Execute an operation; translate AWS exceptions. + * Wraps the operation in an audit span, so that low-level + * calls can be safely made. * @param op operation * @param call call to make * @param returned type @@ -662,7 +665,7 @@ private void head404(final String key) throws Exception { */ private T exec(String op, Callable call) throws Exception { ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); - try { + try (AuditSpan span = getSpanSource().createSpan(op, null, null)) { return call.call(); } catch (AmazonClientException ex) { throw S3AUtils.translateException(op, "", ex); @@ -749,9 +752,7 @@ private List toList( RemoteIterator status) throws IOException { List l = new ArrayList<>(); - while (status.hasNext()) { - l.add(status.next()); - } + foreach(status, st -> l.add(st)); return dump(l); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java new file mode 100644 index 0000000000000..6449d2a5e44c9 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.performance; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AFileSystem; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; +import static org.apache.hadoop.fs.s3a.Statistic.AUDIT_SPAN_CREATION; +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_GET_CONTENT_SUMMARY; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_LIST_REQUEST; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_METADATA_REQUESTS; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.FILESTATUS_DIR_PROBE_L; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.FILE_STATUS_FILE_PROBE; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.LIST_OPERATION; + +/** + * Use metrics to assert about the cost of misc operations. + * Parameterized on directory marker keep vs delete + */ +@RunWith(Parameterized.class) +public class ITestS3AMiscOperationCost extends AbstractS3ACostTest { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestS3AMiscOperationCost.class); + + /** + * Parameterization. + */ + @Parameterized.Parameters(name = "{0}") + public static Collection params() { + return Arrays.asList(new Object[][]{ + {"keep-markers", true}, + {"delete-markers", false} + }); + } + + public ITestS3AMiscOperationCost(final String name, + final boolean keepMarkers) { + super(false, keepMarkers, false); + } + + /** + * Common operation which should be low cost as possible. + */ + @Test + public void testMkdirOverDir() throws Throwable { + describe("create a dir over a dir"); + S3AFileSystem fs = getFileSystem(); + // create base dir with marker + Path baseDir = dir(methodPath()); + + // create the child; only assert on HEAD/GET IO + verifyMetrics(() -> fs.mkdirs(baseDir), + with(AUDIT_SPAN_CREATION, 1), + // full probe on dest plus list only on parent. + with(OBJECT_METADATA_REQUESTS, 0), + with(OBJECT_LIST_REQUEST, FILESTATUS_DIR_PROBE_L)); + } + + @Test + public void testGetContentSummaryRoot() throws Throwable { + describe("getContentSummary on Root"); + S3AFileSystem fs = getFileSystem(); + + Path root = new Path("/"); + verifyMetrics(() -> getContentSummary(root), + with(INVOCATION_GET_CONTENT_SUMMARY, 1)); + } + + @Test + public void testGetContentSummaryDir() throws Throwable { + describe("getContentSummary on test dir with children"); + S3AFileSystem fs = getFileSystem(); + Path baseDir = methodPath(); + Path childDir = new Path(baseDir, "subdir/child"); + touch(fs, childDir); + + final ContentSummary summary = verifyMetrics( + () -> getContentSummary(baseDir), + with(INVOCATION_GET_CONTENT_SUMMARY, 1), + with(AUDIT_SPAN_CREATION, 1), + whenRaw(FILE_STATUS_FILE_PROBE // look at path to see if it is a file + .plus(LIST_OPERATION) // it is not: so LIST + .plus(LIST_OPERATION))); // and a LIST on the child dir + Assertions.assertThat(summary.getDirectoryCount()) + .as("Summary " + summary) + .isEqualTo(2); + Assertions.assertThat(summary.getFileCount()) + .as("Summary " + summary) + .isEqualTo(1); + } + + @Test + public void testGetContentMissingPath() throws Throwable { + describe("getContentSummary on a missing path"); + Path baseDir = methodPath(); + verifyMetricsIntercepting(FileNotFoundException.class, + "", () -> getContentSummary(baseDir), + with(INVOCATION_GET_CONTENT_SUMMARY, 1), + with(AUDIT_SPAN_CREATION, 1), + whenRaw(FILE_STATUS_FILE_PROBE + .plus(FILE_STATUS_FILE_PROBE) + .plus(LIST_OPERATION) + .plus(LIST_OPERATION))); + } + + private ContentSummary getContentSummary(final Path baseDir) + throws IOException { + S3AFileSystem fs = getFileSystem(); + return fs.getContentSummary(baseDir); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMkdirCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMkdirCost.java new file mode 100644 index 0000000000000..639e1dddbae74 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMkdirCost.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.performance; + +import java.util.Arrays; +import java.util.Collection; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AFileSystem; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_LIST_REQUEST; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_METADATA_REQUESTS; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.FILESTATUS_DIR_PROBE_L; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.FILESTATUS_FILE_PROBE_H; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.FILESTATUS_FILE_PROBE_L; + +/** + * Use metrics to assert about the cost of mkdirs. + * Parameterized directory marker keep vs delete + */ +@RunWith(Parameterized.class) +public class ITestS3AMkdirCost extends AbstractS3ACostTest { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestS3AMkdirCost.class); + + /** + * Parameterization. + */ + @Parameterized.Parameters(name = "{0}") + public static Collection params() { + return Arrays.asList(new Object[][]{ + {"keep-markers", true}, + {"delete-markers", false} + }); + } + + public ITestS3AMkdirCost(final String name, + final boolean keepMarkers) { + super(false, true, false); + } + + /** + * Common operation which should be low cost as possible. + */ + @Test + public void testMkdirOverDir() throws Throwable { + describe("create a dir over a dir"); + S3AFileSystem fs = getFileSystem(); + // create base dir with marker + Path baseDir = dir(methodPath()); + + // create the child; only assert on HEAD/GET IO + verifyMetrics(() -> fs.mkdirs(baseDir), + // full probe on dest plus list only on parent. + with(OBJECT_METADATA_REQUESTS, + 0), + with(OBJECT_LIST_REQUEST, FILESTATUS_DIR_PROBE_L)); + } + + /** + * Mkdir with a parent dir will check dest (list+HEAD) + * then do a list on the parent to find the marker. + * Once the dir is created, creating a sibling will + * have the same cost. + */ + @Test + public void testMkdirWithParent() throws Throwable { + describe("create a dir under a dir with a parent"); + S3AFileSystem fs = getFileSystem(); + // create base dir with marker + Path baseDir = dir(methodPath()); + Path childDir = new Path(baseDir, "child"); + + // create the child; only assert on HEAD/GET IO + verifyMetrics(() -> fs.mkdirs(childDir), + // full probe on dest plus list only on parent. + with(OBJECT_METADATA_REQUESTS, + FILESTATUS_FILE_PROBE_H), + + with(OBJECT_LIST_REQUEST, + FILESTATUS_FILE_PROBE_L + 2 * FILESTATUS_DIR_PROBE_L)); + + // now include a sibling; cost will be the same. + Path sibling = new Path(baseDir, "sibling"); + verifyMetrics(() -> fs.mkdirs(sibling), + // full probe on dest plus list only on parent. + with(OBJECT_METADATA_REQUESTS, + FILESTATUS_FILE_PROBE_H), + + with(OBJECT_LIST_REQUEST, + FILESTATUS_FILE_PROBE_L + 2 * FILESTATUS_DIR_PROBE_L)); + } + + /** + * Mkdir with a grandparent dir will check dest (list+HEAD) + * then do a list + HEAD on the parent and ultimately find the + * marker with a list of the parent. + * That's three list calls and two head requsts. + * Once the dir is created, creating a sibling will + * cost less as the list of the parent path will find + * a directory. + */ + @Test + public void testMkdirWithGrandparent() throws Throwable { + describe("create a dir under a dir with a parent"); + S3AFileSystem fs = getFileSystem(); + // create base dir with marker + Path baseDir = dir(methodPath()); + Path subDir = new Path(baseDir, "child/grandchild"); + + // create the child; only assert on HEAD/GET IO + verifyMetrics(() -> fs.mkdirs(subDir), + // full probe on dest plus list only on parent. + with(OBJECT_METADATA_REQUESTS, + 2 * FILESTATUS_FILE_PROBE_H), + + with(OBJECT_LIST_REQUEST, + 3 * FILESTATUS_DIR_PROBE_L)); + + + // now include a sibling; cost will be less because + // now the immediate parent check will succeed on the list call. + Path sibling = new Path(baseDir, "child/sibling"); + + verifyMetrics(() -> fs.mkdirs(sibling), + + // full probe on dest plus list only on parent. + with(OBJECT_METADATA_REQUESTS, + FILESTATUS_FILE_PROBE_H), + + with(OBJECT_LIST_REQUEST, + FILESTATUS_FILE_PROBE_L + 2 * FILESTATUS_DIR_PROBE_L)); + } + + + /** + * When calling mkdir over a file, the list happens first, so + * is always billed for. + * @throws Throwable failure. + */ + @Test + public void testMkdirOverFile() throws Throwable { + describe("create a dir over a file; expect dir and file probes"); + S3AFileSystem fs = getFileSystem(); + // create base dir with marker + Path baseDir = dir(methodPath()); + Path childDir = new Path(baseDir, "child"); + touch(fs, childDir); + + // create the child; only assert on HEAD/GET IO + verifyMetricsIntercepting( + FileAlreadyExistsException.class, "", + () -> fs.mkdirs(childDir), + // full probe on dest plus list only on parent. + with(OBJECT_METADATA_REQUESTS, + FILESTATUS_FILE_PROBE_H), + with(OBJECT_LIST_REQUEST, FILESTATUS_DIR_PROBE_L)); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCostValidator.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCostValidator.java index 3f03abbabc3cc..72e51ee3b9958 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCostValidator.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCostValidator.java @@ -41,6 +41,7 @@ import static java.util.Objects.requireNonNull; import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_LIST_REQUEST; import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_METADATA_REQUESTS; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** @@ -169,7 +170,7 @@ public T exec( LOG.info("{}", text); LOG.info("state {}", this.toString()); LOG.info("probes {}", expected); - LOG.info("IOStatistics {}", ioStatistics); + LOG.info("IOStatistics {}", ioStatisticsToPrettyString(ioStatistics)); for (ExpectedProbe ed : expected) { ed.verify(this, text); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardFsck.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardFsck.java index c4e903df7390f..28eb52ff41ada 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardFsck.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardFsck.java @@ -44,7 +44,6 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE; import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; -import static org.apache.hadoop.fs.s3a.S3ATestUtils.awaitFileStatus; import static org.apache.hadoop.fs.s3a.S3ATestUtils.metadataStorePersistsAuthoritativeBit; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; @@ -207,7 +206,6 @@ public void testIDetectDirInS3FileInMs() throws Exception { try { // create a file with guarded fs mkdirs(cwd); - awaitFileStatus(guardedFs, cwd); // modify the cwd metadata and set that it's not a directory final S3AFileStatus newParentFile = MetadataStoreTestBase .basicFileStatus(cwd, 1, false, 1); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java index 798bdd693081f..ecddbbd820872 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java @@ -272,35 +272,28 @@ public void testUploads() throws Throwable { Path path = path(UPLOAD_PREFIX + "/" + UPLOAD_NAME); describe("Cleaning up any leftover uploads from previous runs."); - // 1. Make sure key doesn't already exist - clearAnyUploads(fs, path); + final String key = fs.pathToKey(path); + try { + // 1. Make sure key doesn't already exist + clearAnyUploads(fs, path); - // 2. Confirm no uploads are listed via API - assertNoUploadsAt(fs, path.getParent()); + // 2. Confirm no uploads are listed via API + assertNoUploadsAt(fs, path.getParent()); - // 3. Confirm no uploads are listed via CLI - describe("Confirming CLI lists nothing."); - assertNumUploads(path, 0); + // 3. Confirm no uploads are listed via CLI + describe("Confirming CLI lists nothing."); + assertNumUploads(path, 0); - // 4. Create a upload part - describe("Uploading single part."); - createPartUpload(fs, fs.pathToKey(path), 128, 1); + // 4. Create a upload part + describe("Uploading single part."); + createPartUpload(fs, key, 128, 1); - try { - // 5. Confirm it exists via API.. - LambdaTestUtils.eventually(5000, /* 5 seconds until failure */ - 1000, /* one second retry interval */ - () -> { - assertEquals("Should be one upload", 1, countUploadsAt(fs, path)); - }); + assertEquals("Should be one upload", 1, countUploadsAt(fs, path)); // 6. Confirm part exists via CLI, direct path and parent path describe("Confirming CLI lists one part"); - LambdaTestUtils.eventually(5000, 1000, - () -> { assertNumUploads(path, 1); }); - LambdaTestUtils.eventually(5000, 1000, - () -> { assertNumUploads(path.getParent(), 1); }); - + assertNumUploads(path, 1); + assertNumUploads(path.getParent(), 1); // 7. Use CLI to delete part, assert it worked describe("Deleting part via CLI"); assertNumDeleted(fs, path, 1); @@ -331,15 +324,15 @@ public void testUploadListByAge() throws Throwable { // 2. Create a upload part describe("Uploading single part."); - createPartUpload(fs, fs.pathToKey(path), 128, 1); + final String key = fs.pathToKey(path); + createPartUpload(fs, key, 128, 1); + //try (AuditSpan span = fs.startOperation("multipart", key, null)) { try { + // 3. Confirm it exists via API.. may want to wrap with // LambdaTestUtils.eventually() ? - LambdaTestUtils.eventually(5000, 1000, - () -> { - assertEquals("Should be one upload", 1, countUploadsAt(fs, path)); - }); + assertEquals("Should be one upload", 1, countUploadsAt(fs, path)); // 4. Confirm part does appear in listing with long age filter describe("Confirming CLI older age doesn't list"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java index 2b3043f39ac73..3e00917a2dbe5 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java @@ -128,8 +128,7 @@ protected Configuration createScaleConfiguration() { @Test public void test_010_CreateHugeFile() throws IOException { - assertFalse("Please run this test sequentially to avoid timeouts" + - " and bandwidth problems", isParallelExecution()); + long filesizeMB = filesize / _1MB; // clean up from any previous attempts diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java index 61f6ef3c76473..d5e4788036ebe 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java @@ -28,7 +28,11 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.WriteOperationHelper; +import org.apache.hadoop.fs.s3a.api.RequestFactory; import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.util.functional.RemoteIterators; import org.junit.Test; import org.assertj.core.api.Assertions; @@ -39,7 +43,7 @@ import java.io.InputStream; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -47,13 +51,17 @@ import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP; import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; import static org.apache.hadoop.fs.s3a.Statistic.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; import static org.apache.hadoop.fs.contract.ContractTestUtils.*; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion; import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupCounterStatistic; import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticCounterValue; -import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_CONTINUE_LIST_REQUEST; import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_LIST_REQUEST; @@ -164,6 +172,13 @@ public void testListOperations() throws Throwable { } } + /** + * This is quite a big test; it PUTs up a number of + * files and then lists them in a filesystem set to ask for a small number + * of files on each listing. + * The standard listing API calls are all used, and then + * delete() is invoked to verify that paged deletion works correctly too. + */ @Test public void testMultiPagesListingPerformanceAndCorrectness() throws Throwable { @@ -180,45 +195,59 @@ public void testMultiPagesListingPerformanceAndCorrectness() .isEqualTo(0); final Configuration conf = getConfigurationWithConfiguredBatchSize(batchSize); - removeBaseAndBucketOverrides(conf, S3_METADATA_STORE_IMPL); - final InputStream im = new InputStream() { - @Override - public int read() throws IOException { - return -1; - } - }; + + removeBaseAndBucketOverrides(conf, + S3_METADATA_STORE_IMPL, + DIRECTORY_MARKER_POLICY); + // force directory markers = keep to save delete requests on every + // file created. + conf.set(DIRECTORY_MARKER_POLICY, DIRECTORY_MARKER_POLICY_KEEP); + S3AFileSystem fs = (S3AFileSystem) FileSystem.get(dir.toUri(), conf); + final List originalListOfFiles = new ArrayList<>(); - List> putObjectRequests = new ArrayList<>(); ExecutorService executorService = Executors .newFixedThreadPool(numOfPutThreads); NanoTimer uploadTimer = new NanoTimer(); - S3AFileSystem fs = (S3AFileSystem) FileSystem.get(dir.toUri(), conf); try { assume("Test is only for raw fs", !fs.hasMetadataStore()); fs.create(dir); + + // create a span for the write operations + final AuditSpan span = fs.getAuditSpanSource() + .createSpan(OBJECT_PUT_REQUESTS.getSymbol(), dir.toString(), null); + final WriteOperationHelper writeOperationHelper + = fs.getWriteOperationHelper(); + final RequestFactory requestFactory + = writeOperationHelper.getRequestFactory(); + List> futures = + new ArrayList<>(numOfPutRequests); + for (int i=0; i - fs.getWriteOperationHelper().putObject(put)); + PutObjectRequest put = requestFactory + .newPutObjectRequest(fs.pathToKey(file), om, + new FailingInputStream()); + futures.add(submit(executorService, () -> + writeOperationHelper.putObject(put))); } - executorService.invokeAll(putObjectRequests); + LOG.info("Waiting for PUTs to complete"); + waitForCompletion(futures); uploadTimer.end("uploading %d files with a parallelism of %d", numOfPutRequests, numOfPutThreads); RemoteIterator resIterator = fs.listFiles(dir, true); List listUsingListFiles = new ArrayList<>(); NanoTimer timeUsingListFiles = new NanoTimer(); - while(resIterator.hasNext()) { - listUsingListFiles.add(resIterator.next().getPath().toString()); - Thread.sleep(eachFileProcessingTime); - } + RemoteIterators.foreach(resIterator, st -> { + listUsingListFiles.add(st.getPath().toString()); + sleep(eachFileProcessingTime); + }); + LOG.info("Listing Statistics: {}", ioStatisticsToPrettyString( + retrieveIOStatistics(resIterator))); + timeUsingListFiles.end("listing %d files using listFiles() api with " + "batch size of %d including %dms of processing time" + " for each file", @@ -226,7 +255,7 @@ public int read() throws IOException { Assertions.assertThat(listUsingListFiles) .describedAs("Listing results using listFiles() must" + - "match with original list of files") + " match with original list of files") .hasSameElementsAs(originalListOfFiles) .hasSize(numOfPutRequests); List listUsingListStatus = new ArrayList<>(); @@ -234,7 +263,7 @@ public int read() throws IOException { FileStatus[] fileStatuses = fs.listStatus(dir); for(FileStatus fileStatus : fileStatuses) { listUsingListStatus.add(fileStatus.getPath().toString()); - Thread.sleep(eachFileProcessingTime); + sleep(eachFileProcessingTime); } timeUsingListStatus.end("listing %d files using listStatus() api with " + "batch size of %d including %dms of processing time" + @@ -247,12 +276,12 @@ public int read() throws IOException { .hasSize(numOfPutRequests); // Validate listing using listStatusIterator(). NanoTimer timeUsingListStatusItr = new NanoTimer(); - RemoteIterator lsItr = fs.listStatusIterator(dir); List listUsingListStatusItr = new ArrayList<>(); - while (lsItr.hasNext()) { - listUsingListStatusItr.add(lsItr.next().getPath().toString()); - Thread.sleep(eachFileProcessingTime); - } + RemoteIterator lsItr = fs.listStatusIterator(dir); + RemoteIterators.foreach(lsItr, st -> { + listUsingListStatusItr.add(st.getPath().toString()); + sleep(eachFileProcessingTime); + }); timeUsingListStatusItr.end("listing %d files using " + "listStatusIterator() api with batch size of %d " + "including %dms of processing time for each file", @@ -265,24 +294,68 @@ public int read() throws IOException { // now validate the statistics returned by the listing // to be non-null and containing list and continue counters. IOStatistics lsStats = retrieveIOStatistics(lsItr); - String statsReport = ioStatisticsToString(lsStats); + String statsReport = ioStatisticsToPrettyString(lsStats); LOG.info("Listing Statistics: {}", statsReport); verifyStatisticCounterValue(lsStats, OBJECT_LIST_REQUEST, 1); long continuations = lookupCounterStatistic(lsStats, OBJECT_CONTINUE_LIST_REQUEST); // calculate expected #of continuations - int expectedContinuations = numOfPutRequests / batchSize -1; + int expectedContinuations = numOfPutRequests / batchSize - 1; Assertions.assertThat(continuations) .describedAs("%s in %s", OBJECT_CONTINUE_LIST_REQUEST, statsReport) .isEqualTo(expectedContinuations); + + List listUsingListLocatedStatus = new ArrayList<>(); + + RemoteIterator it = fs.listLocatedStatus(dir); + RemoteIterators.foreach(it, st -> { + listUsingListLocatedStatus.add(st.getPath().toString()); + sleep(eachFileProcessingTime); + }); + final IOStatistics llsStats = retrieveIOStatistics(it); + LOG.info("Listing Statistics: {}", ioStatisticsToPrettyString( + llsStats)); + verifyStatisticCounterValue(llsStats, OBJECT_CONTINUE_LIST_REQUEST, + expectedContinuations); + Assertions.assertThat(listUsingListLocatedStatus) + .describedAs("Listing results using listLocatedStatus() must" + + "match with original list of files") + .hasSameElementsAs(originalListOfFiles); + // delete in this FS so S3Guard is left out of it. + // and so that the incremental listing is tested through + // the delete operation. + fs.delete(dir, true); } finally { executorService.shutdown(); - // delete in this FS so S3Guard is left out of it. + // in case the previous delete was not reached. fs.delete(dir, true); + LOG.info("FS statistics {}", + ioStatisticsToPrettyString(fs.getIOStatistics())); fs.close(); } } + /** + * Input stream which always returns -1. + */ + private static final class FailingInputStream extends InputStream { + @Override + public int read() throws IOException { + return -1; + } + } + + /** + * Sleep briefly. + * @param eachFileProcessingTime time to sleep. + */ + private void sleep(final int eachFileProcessingTime) { + try { + Thread.sleep(eachFileProcessingTime); + } catch (InterruptedException ignored) { + } + } + private Configuration getConfigurationWithConfiguredBatchSize(int batchSize) { Configuration conf = new Configuration(getFileSystem().getConf()); S3ATestUtils.disableFilesystemCaching(conf); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalListingOperationCallbacks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalListingOperationCallbacks.java index f683a0a008caf..678e5d482d1a5 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalListingOperationCallbacks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalListingOperationCallbacks.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.s3a.impl.ListingOperationCallbacks; import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; import org.apache.hadoop.fs.statistics.DurationTrackerFactory; +import org.apache.hadoop.fs.store.audit.AuditSpan; /** * Stub implementation of {@link ListingOperationCallbacks}. @@ -39,7 +40,8 @@ public class MinimalListingOperationCallbacks @Override public CompletableFuture listObjectsAsync( final S3ListRequest request, - final DurationTrackerFactory trackerFactory) throws IOException { + final DurationTrackerFactory trackerFactory, AuditSpan span) + throws IOException { return null; } @@ -47,7 +49,8 @@ public CompletableFuture listObjectsAsync( public CompletableFuture continueListObjectsAsync( final S3ListRequest request, final S3ListResult prevResult, - final DurationTrackerFactory trackerFactory) throws IOException { + final DurationTrackerFactory trackerFactory, AuditSpan span) + throws IOException { return null; } @@ -60,7 +63,8 @@ public S3ALocatedFileStatus toLocatedFileStatus( @Override public S3ListRequest createListObjectsRequest( String key, - String delimiter) { + String delimiter, + AuditSpan span) { return null; } diff --git a/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml b/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml index a90edbe24fc4a..21e30b6d66cb1 100644 --- a/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml +++ b/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml @@ -57,6 +57,12 @@ Let's postpone existence checks to the first IO operation + + fs.s3a.bucket.landsat-pds.audit.add.referrer.header + false + Do not add the referrer header to landsat operations + + s3guard.null @@ -166,6 +172,13 @@ 0 + + + fs.s3a.audit.reject.out.of.span.operations + true + + 2.0.0-M21 From 3abb9c45691e5be3500f31327010054006753be5 Mon Sep 17 00:00:00 2001 From: Konstantin V Shvachko Date: Thu, 27 May 2021 18:44:27 -0700 Subject: [PATCH 364/666] HDFS-16040. RpcQueueTime metric counts requeued calls as unique events. Contributed by Simbarashe Dzinamarira. --- .../java/org/apache/hadoop/ipc/Server.java | 1 + .../ha/TestConsistentReadsObserver.java | 54 +++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index d37e4a1b24b6a..77d580e227fd3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -2954,6 +2954,7 @@ public void run() { */ // Re-queue the call and continue requeueCall(call); + call = null; continue; } if (LOG.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java index 854027a116497..18f987db97481 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY; +import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -28,6 +30,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -48,6 +51,7 @@ import org.apache.hadoop.ipc.RpcScheduler; import org.apache.hadoop.ipc.Schedulable; import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.junit.After; @@ -419,6 +423,56 @@ public void testMsyncFileContext() throws Exception { } } + @Test + public void testRpcQueueTimeNumOpsMetrics() throws Exception { + // 0 == not completed, 1 == succeeded, -1 == failed + AtomicInteger readStatus = new AtomicInteger(0); + + // Making an uncoordinated call, which initialize the proxy + // to Observer node. + dfs.getClient().getHAServiceState(); + dfs.mkdir(testPath, FsPermission.getDefault()); + assertSentTo(0); + + Thread reader = new Thread(new Runnable() { + @Override + public void run() { + try { + // this read will block until roll and tail edits happen. + dfs.getFileStatus(testPath); + readStatus.set(1); + } catch (IOException e) { + e.printStackTrace(); + readStatus.set(-1); + } + } + }); + + reader.start(); + // the reader is still blocking, not succeeded yet. + assertEquals(0, readStatus.get()); + dfsCluster.rollEditLogAndTail(0); + // wait a while for all the change to be done + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return readStatus.get() != 0; + } + }, 100, 10000); + // the reader should have succeed. + assertEquals(1, readStatus.get()); + + final int observerIdx = 2; + NameNode observerNN = dfsCluster.getNameNode(observerIdx); + MetricsRecordBuilder rpcMetrics = + getMetrics("RpcActivityForPort" + + observerNN.getNameNodeAddress().getPort()); + long rpcQueueTimeNumOps = getLongCounter("RpcQueueTimeNumOps", rpcMetrics); + long rpcProcessingTimeNumOps = getLongCounter("RpcProcessingTimeNumOps", + rpcMetrics); + assertEquals(rpcQueueTimeNumOps, rpcProcessingTimeNumOps); + } + private void assertSentTo(int nnIdx) throws IOException { assertTrue("Request was not sent to the expected namenode " + nnIdx, HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIdx)); From a613feb265b78adab80e835c7255b4cf8681aeb6 Mon Sep 17 00:00:00 2001 From: zhengchenyu Date: Sat, 29 May 2021 03:21:37 +0800 Subject: [PATCH 365/666] MAPREDUCE-7287. Distcp will delete exists file , If we use "-delete and -update" options and distcp file. (#2852) Contributed by zhengchenyu --- .../hadoop/tools/mapred/CopyCommitter.java | 6 +- .../contract/AbstractContractDistCpTest.java | 56 +++++++++++++++++++ .../tools/mapred/TestCopyCommitter.java | 45 +++++++++++++++ .../hadoop/tools/util/TestDistCpUtils.java | 9 +++ 4 files changed, 111 insertions(+), 5 deletions(-) diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java index 33ab3ee4eba81..2272781f72476 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java @@ -553,10 +553,6 @@ private Path listTargetFiles(final Configuration conf, conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH)); List targets = new ArrayList<>(1); targets.add(targetFinalPath); - Path resultNonePath = Path.getPathWithoutSchemeAndAuthority(targetFinalPath) - .toString().startsWith(DistCpConstants.HDFS_RESERVED_RAW_DIRECTORY_NAME) - ? DistCpConstants.RAW_NONE_PATH - : DistCpConstants.NONE_PATH; // // Set up options to be the same from the CopyListing.buildListing's // perspective, so to collect similar listings as when doing the copy @@ -568,7 +564,7 @@ private Path listTargetFiles(final Configuration conf, conf.getBoolean(DistCpConstants.CONF_LABEL_USE_ITERATOR, false); LOG.info("Scanning destination directory {} with thread count: {}", targetFinalPath, threads); - DistCpOptions options = new DistCpOptions.Builder(targets, resultNonePath) + DistCpOptions options = new DistCpOptions.Builder(targets, targetFinalPath) .withOverwrite(overwrite) .withSyncFolder(syncFolder) .withNumListstatusThreads(threads) diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index fab14d138b551..159338f0d441f 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -709,4 +709,60 @@ private Job runDistCpDirectWrite(final Path srcDir, final Path destDir) Collections.singletonList(srcDir), destDir) .withDirectWrite(true))); } + + @Test + public void testDistCpWithFile() throws Exception { + describe("Distcp only file"); + + Path source = new Path(remoteDir, "file"); + Path dest = new Path(localDir, "file"); + dest = localFS.makeQualified(dest); + + mkdirs(remoteFS, remoteDir); + mkdirs(localFS, localDir); + + int len = 4; + int base = 0x40; + byte[] block = dataset(len, base, base + len); + ContractTestUtils.createFile(remoteFS, source, true, block); + verifyPathExists(remoteFS, "", source); + verifyPathExists(localFS, "", localDir); + + DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), + dest.toString(), null, conf); + + Assertions + .assertThat(RemoteIterators.toList(localFS.listFiles(dest, true))) + .describedAs("files").hasSize(1); + verifyFileContents(localFS, dest, block); + } + + @Test + public void testDistCpWithUpdateExistFile() throws Exception { + describe("Now update an exist file."); + + Path source = new Path(remoteDir, "file"); + Path dest = new Path(localDir, "file"); + dest = localFS.makeQualified(dest); + + mkdirs(remoteFS, remoteDir); + mkdirs(localFS, localDir); + + int len = 4; + int base = 0x40; + byte[] block = dataset(len, base, base + len); + byte[] destBlock = dataset(len, base, base + len + 1); + ContractTestUtils.createFile(remoteFS, source, true, block); + ContractTestUtils.createFile(localFS, dest, true, destBlock); + + verifyPathExists(remoteFS, "", source); + verifyPathExists(localFS, "", dest); + DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), + dest.toString(), "-delete -update", conf); + + Assertions.assertThat(RemoteIterators.toList(localFS.listFiles(dest, true))) + .hasSize(1); + verifyFileContents(localFS, dest, block); + } + } \ No newline at end of file diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java index 685f030e15ea0..62940f64b3488 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java @@ -265,6 +265,51 @@ public void testDeleteMissing() throws IOException { } } + @Test + public void testDeleteMissingWithOnlyFile() throws IOException { + TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); + JobContext jobContext = new JobContextImpl(taskAttemptContext + .getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID()); + Configuration conf = jobContext.getConfiguration(); + + String sourceBase; + String targetBase; + FileSystem fs = null; + try { + OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); + fs = FileSystem.get(conf); + sourceBase = TestDistCpUtils.createTestSetupWithOnlyFile(fs, + FsPermission.getDefault()); + targetBase = TestDistCpUtils.createTestSetupWithOnlyFile(fs, + FsPermission.getDefault()); + + final DistCpOptions options = new DistCpOptions.Builder( + Collections.singletonList(new Path(sourceBase)), new Path(targetBase)) + .withSyncFolder(true).withDeleteMissing(true).build(); + options.appendToConf(conf); + final DistCpContext context = new DistCpContext(options); + + CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS); + Path listingFile = new Path(sourceBase); + listing.buildListing(listingFile, context); + + conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase); + conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase); + + committer.commitJob(jobContext); + verifyFoldersAreInSync(fs, targetBase, sourceBase); + verifyFoldersAreInSync(fs, sourceBase, targetBase); + + //Test for idempotent commit + committer.commitJob(jobContext); + verifyFoldersAreInSync(fs, targetBase, sourceBase); + verifyFoldersAreInSync(fs, sourceBase, targetBase); + } finally { + TestDistCpUtils.delete(fs, "/tmp1"); + conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING, "false"); + } + } + // for HDFS-14621, should preserve times after -delete @Test public void testPreserveTimeWithDeleteMiss() throws IOException { diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java index 7c9a5e30948b3..7d17167c3e8df 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java @@ -1361,6 +1361,15 @@ private static String getBase(String base) { return base + "/" + location; } + public static String createTestSetupWithOnlyFile(FileSystem fs, + FsPermission perm) throws IOException { + String location = String.valueOf(rand.nextLong()); + fs.mkdirs(new Path("/tmp1/" + location)); + fs.setPermission(new Path("/tmp1/" + location), perm); + createFile(fs, new Path("/tmp1/" + location + "/file")); + return "/tmp1/" + location + "/file"; + } + public static void delete(FileSystem fs, String path) { try { if (fs != null) { From 54f39a002e89b6cdfc88ad95ada7b09b06c644ac Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Sat, 29 May 2021 23:04:48 +0900 Subject: [PATCH 366/666] HDFS-16046. TestBalancerProcedureScheduler and TestDistCpProcedure timeouts. (#3060) Reviewed-by: Ayush Saxena --- .../org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java | 2 +- .../fedbalance/procedure/TestBalanceProcedureScheduler.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java index 8e282d1eb3380..34b2d7b1d521f 100644 --- a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/TestDistCpProcedure.java @@ -98,7 +98,7 @@ public static void afterClass() { } } - @Test(timeout = 30000) + @Test(timeout = 90000) public void testSuccessfulDistCpProcedure() throws Exception { String testRoot = nnUri + "/user/foo/testdir." + getMethodName(); DistributedFileSystem fs = diff --git a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/TestBalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/TestBalanceProcedureScheduler.java index 7a2b449ce48c1..5a0798b8331db 100644 --- a/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/TestBalanceProcedureScheduler.java +++ b/hadoop-tools/hadoop-federation-balance/src/test/java/org/apache/hadoop/tools/fedbalance/procedure/TestBalanceProcedureScheduler.java @@ -305,7 +305,7 @@ public void testJobSerializeAndDeserialize() throws Exception { /** * Test scheduler crashes and recovers. */ - @Test(timeout = 60000) + @Test(timeout = 180000) public void testSchedulerDownAndRecoverJob() throws Exception { BalanceProcedureScheduler scheduler = new BalanceProcedureScheduler(CONF); scheduler.init(true); From c26f04a68fd868495262002cbf97f841eef9effd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E7=BB=8F=E7=BA=B6?= Date: Sun, 30 May 2021 18:50:03 +0800 Subject: [PATCH 367/666] HDFS-15973. RBF: Add permission check before doing router federation rename. Contributed by Jinglun. Reviewed-by: Inigo Goiri --- .../router/RouterFederationRename.java | 72 ++++- .../src/site/markdown/HDFSRouterFederation.md | 5 +- .../router/TestRouterFederationRename.java | 183 +++---------- .../TestRouterFederationRenameBase.java | 203 +++++++++++++++ ...stRouterFederationRenameInKerberosEnv.java | 49 +++- .../TestRouterFederationRenamePermission.java | 246 ++++++++++++++++++ 6 files changed, 606 insertions(+), 152 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java index d2bf989f1f46e..aafb685b88626 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java @@ -21,7 +21,11 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.fedbalance.DistCpProcedure; import org.apache.hadoop.tools.fedbalance.FedBalanceConfigs; @@ -55,7 +59,11 @@ import org.slf4j.LoggerFactory; /** - * Rename across router based federation namespaces. + * Rename across router federation namespaces based on federation balance. Both + * the src and the dst coming from different namespaces need to have only one + * destination. Snapshot paths are not allowed. + * Users need write privilege of both src parent and dst parent to do router + * federation rename. */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -99,6 +107,8 @@ boolean routerFedRename(final String src, final String dst, } RemoteLocation srcLoc = srcLocations.get(0); RemoteLocation dstLoc = dstLocations.get(0); + checkSnapshotPath(srcLoc, dstLoc); + checkPermission(srcLoc, dstLoc); UserGroupInformation routerUser = UserGroupInformation.getLoginUser(); @@ -131,6 +141,66 @@ boolean routerFedRename(final String src, final String dst, } } + /** + * Check router federation rename permission. + */ + private void checkPermission(RemoteLocation src, RemoteLocation dst) + throws IOException { + try { + if (UserGroupInformation.isSecurityEnabled()) { + // In security mode, check permission as remote user proxy by router + // user. + String remoteUserName = NameNode.getRemoteUser().getShortUserName(); + UserGroupInformation proxyUser = UserGroupInformation + .createProxyUser(remoteUserName, + UserGroupInformation.getLoginUser()); + proxyUser.doAs((PrivilegedExceptionAction) () -> { + checkRenamePermission(src, dst); + return null; + }); + } else { + // In simple mode, check permission as remote user directly. + checkRenamePermission(src, dst); + } + } catch (AccessControlException e) { + throw new AccessControlException( + "Permission denied rename " + src.getSrc() + "(" + src + ") to " + dst + .getSrc() + "(" + dst + ") Reason=" + e.getMessage()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new InterruptedIOException( + "Router Federation Rename is interrupted while checking permission."); + } + } + + private void checkRenamePermission(RemoteLocation srcLoc, + RemoteLocation dstLoc) throws IOException { + // check src path permission. + Path srcPath = + new Path("hdfs://" + srcLoc.getNameserviceId() + srcLoc.getDest()); + srcPath.getFileSystem(conf).access(srcPath.getParent(), FsAction.WRITE); + // check dst path permission. + Path dstPath = + new Path("hdfs://" + dstLoc.getNameserviceId() + dstLoc.getDest()); + dstPath.getFileSystem(conf).access(dstPath.getParent(), FsAction.WRITE); + } + + static void checkSnapshotPath(RemoteLocation src, RemoteLocation dst) + throws AccessControlException { + if (src.getDest() + .contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) { + throw new AccessControlException( + "Router federation rename can't rename snapshot path. src=" + src + .getSrc() + "(" + src + ")"); + } + if (dst.getDest() + .contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) { + throw new AccessControlException( + "Router federation rename can't rename snapshot path. dst=" + dst + .getSrc() + "(" + dst + ")"); + } + } + /** * Build router federation rename job moving data from src to dst. * @param srcNs the source namespace id. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index d7838c75f3804..151289fa7f7d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -514,7 +514,10 @@ More metrics info can see [RBF Metrics](../../hadoop-project-dist/hadoop-common/ Router Federation Rename ------- -Enable Router to rename across namespaces. Currently the router federation rename is implemented by distcp. We must set the rpc timeout high enough so it won't timeout. +Enable Router to rename across namespaces. Currently it is implemented based on [HDFS Federation Balance](../../../hadoop-federation-balance/HDFSFederationBalance.md) and has some limits comparing with normal rename. +1. It is much slower than the normal rename so need a longer RPC timeout configuration. See `ipc.client.rpc-timeout.ms` and its description for more information about RPC timeout. +2. It doesn't support snapshot path. +3. It doesn't support to rename path with multiple destinations. | Property | Default | Description| |:---- |:---- |:---- | diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java index a9a17b3ef5025..3f73d951bcf68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java @@ -19,38 +19,32 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; -import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH; -import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_MAP; import static org.apache.hadoop.test.GenericTestUtils.getMethodName; -import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.verify; import java.io.IOException; -import java.util.Arrays; import java.util.List; -import java.util.Random; +import java.util.Arrays; +import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; -import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; -import org.apache.hadoop.hdfs.server.federation.MockResolver; -import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.tools.fedbalance.DistCpProcedure; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -60,137 +54,53 @@ /** * Basic tests of router federation rename. Rename across namespaces. */ -public class TestRouterFederationRename { - - private static final int NUM_SUBCLUSTERS = 2; - private static final int NUM_DNS = 6; +public class TestRouterFederationRename extends TestRouterFederationRenameBase { - /** Federated HDFS cluster. */ - private static MiniRouterDFSCluster cluster; - - /** Random Router for this federated cluster. */ - private RouterContext router; + public static class MockGroupsMapping implements + GroupMappingServiceProvider { - /** Random nameservice in the federated cluster. */ - private String ns; - /** Filesystem interface to the Router. */ - private FileSystem routerFS; - /** Filesystem interface to the Namenode. */ - private FileSystem nnFS; - /** File in the Namenode. */ - private String nnFile; + @Override + public List getGroups(String user) { + return Arrays.asList(user+"_group"); + } - @BeforeClass - public static void globalSetUp() throws Exception { - Configuration namenodeConf = new Configuration(); - namenodeConf.setBoolean(DFSConfigKeys.HADOOP_CALLER_CONTEXT_ENABLED_KEY, - true); - cluster = new MiniRouterDFSCluster(false, NUM_SUBCLUSTERS); - cluster.setNumDatanodesPerNameservice(NUM_DNS); - cluster.addNamenodeOverrides(namenodeConf); - cluster.setIndependentDNs(); + @Override + public void cacheGroupsRefresh() { + } - Configuration conf = new Configuration(); - conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 5); - cluster.addNamenodeOverrides(conf); - // Start NNs and DNs and wait until ready. - cluster.startCluster(); + @Override + public void cacheGroupsAdd(List groups) { + } - // Start routers, enable router federation rename. - String journal = "hdfs://" + cluster.getCluster().getNameNode(1) - .getClientNamenodeAddress() + "/journal"; - Configuration routerConf = new RouterConfigBuilder() - .metrics() - .rpc() - .routerRenameOption() - .set(SCHEDULER_JOURNAL_URI, journal) - .set(DFS_ROUTER_FEDERATION_RENAME_MAP, "1") - .set(DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH, "1") - .build(); - // We decrease the DN cache times to make the test faster. - routerConf.setTimeDuration( - RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); - cluster.addRouterOverrides(routerConf); - cluster.startRouters(); + @Override + public Set getGroupsSet(String user) { + return ImmutableSet.of(user+"_group"); + } + } - // Register and verify all NNs with all routers - cluster.registerNamenodes(); - cluster.waitNamenodeRegistration(); + private RouterContext router; + private FileSystem routerFS; + private MiniRouterDFSCluster cluster; - // We decrease the DN heartbeat expire interval to make them dead faster - cluster.getCluster().getNamesystem(0).getBlockManager() - .getDatanodeManager().setHeartbeatInterval(1); - cluster.getCluster().getNamesystem(1).getBlockManager() - .getDatanodeManager().setHeartbeatInterval(1); - cluster.getCluster().getNamesystem(0).getBlockManager() - .getDatanodeManager().setHeartbeatExpireInterval(3000); - cluster.getCluster().getNamesystem(1).getBlockManager() - .getDatanodeManager().setHeartbeatExpireInterval(3000); - DistCpProcedure.enableForTest(); + @BeforeClass + public static void before() throws Exception { + globalSetUp(); } @AfterClass - public static void tearDown() { - cluster.shutdown(); - DistCpProcedure.disableForTest(); + public static void after() { + tearDown(); } @Before public void testSetup() throws Exception { - - // Create mock locations - cluster.installMockLocations(); - - // Delete all files via the NNs and verify - cluster.deleteAllFiles(); - - // Create test fixtures on NN - cluster.createTestDirectoriesNamenode(); - - // Wait to ensure NN has fully created its test directories - Thread.sleep(100); - - // Random router for this test - RouterContext rndRouter = cluster.getRandomRouter(); - this.setRouter(rndRouter); - - // Create a mount that points to 2 dirs in the same ns: - // /same - // ns0 -> / - // ns0 -> /target-ns0 - for (RouterContext rc : cluster.getRouters()) { - Router r = rc.getRouter(); - MockResolver resolver = (MockResolver) r.getSubclusterResolver(); - List nss = cluster.getNameservices(); - String ns0 = nss.get(0); - resolver.addLocation("/same", ns0, "/"); - resolver.addLocation("/same", ns0, cluster.getNamenodePathForNS(ns0)); - } - - // Pick a namenode for this test - String ns0 = cluster.getNameservices().get(0); - this.setNs(ns0); - this.setNamenode(cluster.getNamenode(ns0, null)); - - // Create a test file on the NN - Random rnd = new Random(); - String randomFile = "testfile-" + rnd.nextInt(); - this.nnFile = - cluster.getNamenodeTestDirectoryForNS(ns) + "/" + randomFile; - - createFile(nnFS, nnFile, 32); - verifyFileExists(nnFS, nnFile); - } - - protected void createDir(FileSystem fs, String dir) throws IOException { - fs.mkdirs(new Path(dir)); - String file = dir + "/file"; - createFile(fs, file, 32); - verifyFileExists(fs, dir); - verifyFileExists(fs, file); + setup(); + router = getRouterContext(); + routerFS = getRouterFileSystem(); + cluster = getCluster(); } - protected void testRenameDir(RouterContext testRouter, String path, + private void testRenameDir(RouterContext testRouter, String path, String renamedPath, boolean exceptionExpected, Callable call) throws IOException { createDir(testRouter.getFileSystem(), path); @@ -219,23 +129,6 @@ protected void testRenameDir(RouterContext testRouter, String path, } } - protected void setRouter(RouterContext r) throws IOException { - this.router = r; - this.routerFS = r.getFileSystem(); - } - - protected void setNs(String nameservice) { - this.ns = nameservice; - } - - protected void setNamenode(NamenodeContext nn) throws IOException { - this.nnFS = nn.getFileSystem(); - } - - protected FileSystem getRouterFileSystem() { - return this.routerFS; - } - @Test public void testSuccessfulRbfRename() throws Exception { List nss = cluster.getNameservices(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java new file mode 100644 index 0000000000000..40464923dda0e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java @@ -0,0 +1,203 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_MAP; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ADMIN_ENABLE; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI; + +import java.io.IOException; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.MockResolver; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.tools.fedbalance.DistCpProcedure; + +/** + * Test base of router federation rename. + */ +public class TestRouterFederationRenameBase { + + static final int NUM_SUBCLUSTERS = 2; + static final int NUM_DNS = 6; + + /** Random Router for this federated cluster. */ + private MiniRouterDFSCluster.RouterContext router; + + /** Random nameservice in the federated cluster. */ + private String ns; + /** Filesystem interface to the Router. */ + private FileSystem routerFS; + /** Filesystem interface to the Namenode. */ + private FileSystem nnFS; + /** File in the Namenode. */ + private String nnFile; + + /** Federated HDFS cluster. */ + private static MiniRouterDFSCluster cluster; + + public static void globalSetUp() throws Exception { + Configuration namenodeConf = new Configuration(); + namenodeConf.setBoolean(DFSConfigKeys.HADOOP_CALLER_CONTEXT_ENABLED_KEY, + true); + namenodeConf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, + TestRouterFederationRename.MockGroupsMapping.class.getName()); + cluster = new MiniRouterDFSCluster(false, NUM_SUBCLUSTERS); + cluster.setNumDatanodesPerNameservice(NUM_DNS); + cluster.addNamenodeOverrides(namenodeConf); + cluster.setIndependentDNs(); + + Configuration conf = new Configuration(); + conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 5); + cluster.addNamenodeOverrides(conf); + // Start NNs and DNs and wait until ready. + cluster.startCluster(); + + // Start routers, enable router federation rename. + String journal = "hdfs://" + cluster.getCluster().getNameNode(1) + .getClientNamenodeAddress() + "/journal"; + Configuration routerConf = new RouterConfigBuilder() + .metrics() + .rpc() + .routerRenameOption() + .set(SCHEDULER_JOURNAL_URI, journal) + .set(DFS_ROUTER_FEDERATION_RENAME_MAP, "1") + .set(DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH, "1") + .build(); + // We decrease the DN cache times to make the test faster. + routerConf.setTimeDuration( + RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); + routerConf.setBoolean(DFS_ROUTER_ADMIN_ENABLE, true); + routerConf.setBoolean(DFS_PERMISSIONS_ENABLED_KEY, true); + routerConf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, + TestRouterFederationRename.MockGroupsMapping.class.getName()); + cluster.addRouterOverrides(routerConf); + cluster.startRouters(); + + // Register and verify all NNs with all routers + cluster.registerNamenodes(); + cluster.waitNamenodeRegistration(); + + // We decrease the DN heartbeat expire interval to make them dead faster + cluster.getCluster().getNamesystem(0).getBlockManager() + .getDatanodeManager().setHeartbeatInterval(1); + cluster.getCluster().getNamesystem(1).getBlockManager() + .getDatanodeManager().setHeartbeatInterval(1); + cluster.getCluster().getNamesystem(0).getBlockManager() + .getDatanodeManager().setHeartbeatExpireInterval(3000); + cluster.getCluster().getNamesystem(1).getBlockManager() + .getDatanodeManager().setHeartbeatExpireInterval(3000); + DistCpProcedure.enableForTest(); + } + + public static void tearDown() { + cluster.shutdown(); + cluster = null; + DistCpProcedure.disableForTest(); + } + + protected void setup() throws IOException, InterruptedException { + + // Create mock locations + cluster.installMockLocations(); + + // Delete all files via the NNs and verify + cluster.deleteAllFiles(); + + // Create test fixtures on NN + cluster.createTestDirectoriesNamenode(); + + // Random router for this test + MiniRouterDFSCluster.RouterContext rndRouter = cluster.getRandomRouter(); + this.setRouter(rndRouter); + + // Create a mount that points to 2 dirs in the same ns: + // /same + // ns0 -> / + // ns0 -> /target-ns0 + for (MiniRouterDFSCluster.RouterContext rc : cluster.getRouters()) { + Router r = rc.getRouter(); + MockResolver resolver = (MockResolver) r.getSubclusterResolver(); + List nss = cluster.getNameservices(); + String ns0 = nss.get(0); + resolver.addLocation("/same", ns0, "/"); + resolver.addLocation("/same", ns0, cluster.getNamenodePathForNS(ns0)); + } + + // Pick a namenode for this test + String ns0 = cluster.getNameservices().get(0); + this.setNs(ns0); + this.setNamenode(cluster.getNamenode(ns0, null)); + + // Create a test file on the NN + Random rnd = new Random(); + String randomFile = "testfile-" + rnd.nextInt(); + this.nnFile = + cluster.getNamenodeTestDirectoryForNS(ns) + "/" + randomFile; + + createFile(nnFS, nnFile, 32); + verifyFileExists(nnFS, nnFile); + } + + protected void setRouter(MiniRouterDFSCluster.RouterContext r) throws + IOException { + this.router = r; + this.routerFS = r.getFileSystem(); + } + + protected void setNs(String nameservice) { + this.ns = nameservice; + } + + protected void setNamenode(MiniRouterDFSCluster.NamenodeContext nn) + throws IOException { + this.nnFS = nn.getFileSystem(); + } + + protected FileSystem getRouterFileSystem() { + return this.routerFS; + } + + protected void createDir(FileSystem fs, String dir) throws IOException { + fs.mkdirs(new Path(dir)); + String file = dir + "/file"; + createFile(fs, file, 32); + verifyFileExists(fs, dir); + verifyFileExists(fs, file); + } + + public MiniRouterDFSCluster getCluster() { + return cluster; + } + + public MiniRouterDFSCluster.RouterContext getRouterContext() { + return router; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java index 369508f3b22f7..28956928d4db6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java @@ -18,9 +18,11 @@ package org.apache.hadoop.hdfs.server.federation.router; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.ClientBaseWithFixes; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -31,6 +33,8 @@ import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ImpersonationProvider; import org.apache.hadoop.tools.fedbalance.DistCpProcedure; import org.junit.After; import org.junit.AfterClass; @@ -40,11 +44,13 @@ import java.io.File; import java.io.IOException; +import java.net.InetAddress; import java.security.PrivilegedExceptionAction; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS; import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; @@ -116,10 +122,35 @@ public static void globalSetUp() throws Exception { baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, DFS_DATA_TRANSFER_PROTECTION_DEFAULT); baseConf.setBoolean(IGNORE_SECURE_PORTS_FOR_TESTING_KEY, true); + baseConf.setClass(HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS, + AllowUserImpersonationProvider.class, ImpersonationProvider.class); DistCpProcedure.enableForTest(); } + /** + * {@link ImpersonationProvider} that confirms the user doing the + * impersonating is the same as the user running the MiniCluster. + */ + private static class AllowUserImpersonationProvider extends Configured + implements ImpersonationProvider { + public void init(String configurationPrefix) { + // Do nothing + } + + public void authorize(UserGroupInformation user, InetAddress remoteAddress) + throws AuthorizationException { + try { + if (!user.getRealUser().getShortUserName() + .equals(UserGroupInformation.getCurrentUser().getShortUserName())) { + throw new AuthorizationException(); + } + } catch (IOException ioe) { + throw new AuthorizationException(ioe); + } + } + } + @AfterClass public static void globalTearDown() { kdc.stop(); @@ -191,18 +222,26 @@ public void setUp() throws Exception { setRouter(rndRouter); } - protected void createDir(FileSystem fs, String dir) throws IOException { - fs.mkdirs(new Path(dir)); - String file = dir + "/file"; + protected void prepareEnv(FileSystem fs, Path path, Path renamedPath) + throws IOException { + // Set permission of parent to 777. + fs.setPermission(path.getParent(), + FsPermission.createImmutable((short)511)); + fs.setPermission(renamedPath.getParent(), + FsPermission.createImmutable((short)511)); + // Create src path and file. + fs.mkdirs(path); + String file = path.toString() + "/file"; createFile(fs, file, 32); - verifyFileExists(fs, dir); + verifyFileExists(fs, path.toString()); verifyFileExists(fs, file); } protected void testRenameDir(RouterContext testRouter, String path, String renamedPath, boolean exceptionExpected, Callable call) throws IOException { - createDir(testRouter.getFileSystem(), path); + prepareEnv(testRouter.getFileSystem(), new Path(path), + new Path(renamedPath)); // rename boolean exceptionThrown = false; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java new file mode 100644 index 0000000000000..cb828db1b5587 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java @@ -0,0 +1,246 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.fs.permission.FsAction.ALL; +import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; +import static org.apache.hadoop.test.GenericTestUtils.getMethodName; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Test permission check of router federation rename. + */ +public class TestRouterFederationRenamePermission + extends TestRouterFederationRenameBase { + + private String srcNs; // the source namespace. + private String dstNs; // the dst namespace. + // the source path. + private String srcStr; + private Path srcPath; + // the dst path. + private String dstStr; + private Path dstPath; + private UserGroupInformation foo; + private MiniRouterDFSCluster.RouterContext router; + private FileSystem routerFS; + private MiniRouterDFSCluster cluster; + + @BeforeClass + public static void before() throws Exception { + globalSetUp(); + } + + @AfterClass + public static void after() { + tearDown(); + } + + @Before + public void testSetup() throws Exception { + setup(); + cluster = getCluster(); + List nss = cluster.getNameservices(); + srcNs = nss.get(0); + dstNs = nss.get(1); + srcStr = cluster.getFederatedTestDirectoryForNS(srcNs) + "/d0/" + + getMethodName(); + dstStr = cluster.getFederatedTestDirectoryForNS(dstNs) + "/d0/" + + getMethodName(); + srcPath = new Path(srcStr); + dstPath = new Path(dstStr); + foo = UserGroupInformation.createRemoteUser("foo"); + router = getRouterContext(); + routerFS = getRouterFileSystem(); + } + + @Test + public void testRenameSnapshotPath() throws Exception { + LambdaTestUtils.intercept(IOException.class, + "Router federation rename can't rename snapshot path", + "Expect IOException.", () -> RouterFederationRename.checkSnapshotPath( + new RemoteLocation(srcNs, "/foo/.snapshot/src", "/src"), + new RemoteLocation(dstNs, "/foo/dst", "/dst"))); + LambdaTestUtils.intercept(IOException.class, + "Router federation rename can't rename snapshot path", + "Expect IOException.", () -> RouterFederationRename + .checkSnapshotPath(new RemoteLocation(srcNs, "/foo/src", "/src"), + new RemoteLocation(dstNs, "/foo/.snapshot/dst", "/dst"))); + } + + // Case1: the source path doesn't exist. + @Test + public void testPermission1() throws Exception { + LambdaTestUtils.intercept(RemoteException.class, "FileNotFoundException", + "Expect FileNotFoundException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case2: the source path parent without any permission. + @Test + public void testPermission2() throws Exception { + createDir(routerFS, srcStr); + routerFS.setPermission(srcPath.getParent(), + FsPermission.createImmutable((short) 0)); + LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", + "Expect AccessControlException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case3: the source path with rwxr-xr-x permission. + @Test + public void testPermission3() throws Exception { + createDir(routerFS, srcStr); + routerFS.setPermission(srcPath.getParent(), + FsPermission.createImmutable((short) 493)); + LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", + "Expect AccessControlException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case4: the source path with unrelated acl user:not-foo:rwx. + @Test + public void testPermission4() throws Exception { + createDir(routerFS, srcStr); + routerFS.setAcl(srcPath.getParent(), buildAcl("not-foo", ALL)); + LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", + "Expect AccessControlException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case5: the source path with user:foo:rwx. And the dst path doesn't exist. + @Test + public void testPermission5() throws Exception { + createDir(routerFS, srcStr); + routerFS.setAcl(srcPath.getParent(), buildAcl("foo", ALL)); + assertFalse(routerFS.exists(dstPath.getParent())); + LambdaTestUtils.intercept(RemoteException.class, "FileNotFoundException", + "Expect FileNotFoundException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case6: the src path with correct permission and the dst path with bad + // permission. + @Test + public void testPermission6() throws Exception { + createDir(routerFS, srcStr); + routerFS.setAcl(srcPath.getParent(), buildAcl("foo", ALL)); + assertTrue(routerFS.mkdirs(dstPath.getParent())); + LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", + "Expect AccessControlException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case7: successful rename. + @Test + public void testPermission7() throws Exception { + createDir(routerFS, srcStr); + routerFS.setAcl(srcPath.getParent(), buildAcl("foo", ALL)); + assertTrue(routerFS.mkdirs(dstPath.getParent())); + routerFS.setOwner(dstPath.getParent(), "foo", "foogroup"); + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + assertFalse(verifyFileExists(routerFS, srcStr)); + assertTrue( + verifyFileExists(routerFS, dstStr + "/file")); + } + + /** + * Build acl list. + * + * user::rwx + * group::rwx + * user:input_user:input_permission + * other::r-x + * @param user the input user. + * @param permission the input fs action. + */ + private List buildAcl(String user, FsAction permission) { + List aclEntryList = Lists.newArrayList(); + aclEntryList.add( + new AclEntry.Builder() + .setName(user) + .setPermission(permission) + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER) + .build()); + aclEntryList.add( + new AclEntry.Builder() + .setPermission(FsAction.ALL) + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER) + .build()); + aclEntryList.add( + new AclEntry.Builder() + .setPermission(FsAction.ALL) + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.GROUP) + .build()); + aclEntryList.add( + new AclEntry.Builder() + .setPermission(READ_EXECUTE) + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER) + .build()); + return aclEntryList; + } +} From 11b5598b753279381fedd6df5215ab35c30ef9ac Mon Sep 17 00:00:00 2001 From: Jinglun Date: Sun, 30 May 2021 19:27:10 +0800 Subject: [PATCH 368/666] Revert "HDFS-15973." because the user.name and user.email message is not correct. This reverts commit b7f69467c19f6114b931ca2377b0ed7a8ada609e. --- .../router/RouterFederationRename.java | 72 +---- .../src/site/markdown/HDFSRouterFederation.md | 5 +- .../router/TestRouterFederationRename.java | 183 ++++++++++--- .../TestRouterFederationRenameBase.java | 203 --------------- ...stRouterFederationRenameInKerberosEnv.java | 49 +--- .../TestRouterFederationRenamePermission.java | 246 ------------------ 6 files changed, 152 insertions(+), 606 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java index aafb685b88626..d2bf989f1f46e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java @@ -21,11 +21,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.fedbalance.DistCpProcedure; import org.apache.hadoop.tools.fedbalance.FedBalanceConfigs; @@ -59,11 +55,7 @@ import org.slf4j.LoggerFactory; /** - * Rename across router federation namespaces based on federation balance. Both - * the src and the dst coming from different namespaces need to have only one - * destination. Snapshot paths are not allowed. - * Users need write privilege of both src parent and dst parent to do router - * federation rename. + * Rename across router based federation namespaces. */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -107,8 +99,6 @@ boolean routerFedRename(final String src, final String dst, } RemoteLocation srcLoc = srcLocations.get(0); RemoteLocation dstLoc = dstLocations.get(0); - checkSnapshotPath(srcLoc, dstLoc); - checkPermission(srcLoc, dstLoc); UserGroupInformation routerUser = UserGroupInformation.getLoginUser(); @@ -141,66 +131,6 @@ boolean routerFedRename(final String src, final String dst, } } - /** - * Check router federation rename permission. - */ - private void checkPermission(RemoteLocation src, RemoteLocation dst) - throws IOException { - try { - if (UserGroupInformation.isSecurityEnabled()) { - // In security mode, check permission as remote user proxy by router - // user. - String remoteUserName = NameNode.getRemoteUser().getShortUserName(); - UserGroupInformation proxyUser = UserGroupInformation - .createProxyUser(remoteUserName, - UserGroupInformation.getLoginUser()); - proxyUser.doAs((PrivilegedExceptionAction) () -> { - checkRenamePermission(src, dst); - return null; - }); - } else { - // In simple mode, check permission as remote user directly. - checkRenamePermission(src, dst); - } - } catch (AccessControlException e) { - throw new AccessControlException( - "Permission denied rename " + src.getSrc() + "(" + src + ") to " + dst - .getSrc() + "(" + dst + ") Reason=" + e.getMessage()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new InterruptedIOException( - "Router Federation Rename is interrupted while checking permission."); - } - } - - private void checkRenamePermission(RemoteLocation srcLoc, - RemoteLocation dstLoc) throws IOException { - // check src path permission. - Path srcPath = - new Path("hdfs://" + srcLoc.getNameserviceId() + srcLoc.getDest()); - srcPath.getFileSystem(conf).access(srcPath.getParent(), FsAction.WRITE); - // check dst path permission. - Path dstPath = - new Path("hdfs://" + dstLoc.getNameserviceId() + dstLoc.getDest()); - dstPath.getFileSystem(conf).access(dstPath.getParent(), FsAction.WRITE); - } - - static void checkSnapshotPath(RemoteLocation src, RemoteLocation dst) - throws AccessControlException { - if (src.getDest() - .contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) { - throw new AccessControlException( - "Router federation rename can't rename snapshot path. src=" + src - .getSrc() + "(" + src + ")"); - } - if (dst.getDest() - .contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) { - throw new AccessControlException( - "Router federation rename can't rename snapshot path. dst=" + dst - .getSrc() + "(" + dst + ")"); - } - } - /** * Build router federation rename job moving data from src to dst. * @param srcNs the source namespace id. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index 151289fa7f7d8..d7838c75f3804 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -514,10 +514,7 @@ More metrics info can see [RBF Metrics](../../hadoop-project-dist/hadoop-common/ Router Federation Rename ------- -Enable Router to rename across namespaces. Currently it is implemented based on [HDFS Federation Balance](../../../hadoop-federation-balance/HDFSFederationBalance.md) and has some limits comparing with normal rename. -1. It is much slower than the normal rename so need a longer RPC timeout configuration. See `ipc.client.rpc-timeout.ms` and its description for more information about RPC timeout. -2. It doesn't support snapshot path. -3. It doesn't support to rename path with multiple destinations. +Enable Router to rename across namespaces. Currently the router federation rename is implemented by distcp. We must set the rpc timeout high enough so it won't timeout. | Property | Default | Description| |:---- |:---- |:---- | diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java index 3f73d951bcf68..a9a17b3ef5025 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java @@ -19,32 +19,38 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_MAP; import static org.apache.hadoop.test.GenericTestUtils.getMethodName; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertEquals; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI; +import static org.junit.Assert.*; import static org.mockito.Mockito.verify; import java.io.IOException; -import java.util.List; import java.util.Arrays; -import java.util.Set; +import java.util.List; +import java.util.Random; import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; +import org.apache.hadoop.hdfs.server.federation.MockResolver; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.tools.fedbalance.DistCpProcedure; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -54,53 +60,137 @@ /** * Basic tests of router federation rename. Rename across namespaces. */ -public class TestRouterFederationRename extends TestRouterFederationRenameBase { +public class TestRouterFederationRename { - public static class MockGroupsMapping implements - GroupMappingServiceProvider { + private static final int NUM_SUBCLUSTERS = 2; + private static final int NUM_DNS = 6; - @Override - public List getGroups(String user) { - return Arrays.asList(user+"_group"); - } - - @Override - public void cacheGroupsRefresh() { - } - - @Override - public void cacheGroupsAdd(List groups) { - } - - @Override - public Set getGroupsSet(String user) { - return ImmutableSet.of(user+"_group"); - } - } + /** Federated HDFS cluster. */ + private static MiniRouterDFSCluster cluster; + /** Random Router for this federated cluster. */ private RouterContext router; + + /** Random nameservice in the federated cluster. */ + private String ns; + /** Filesystem interface to the Router. */ private FileSystem routerFS; - private MiniRouterDFSCluster cluster; + /** Filesystem interface to the Namenode. */ + private FileSystem nnFS; + /** File in the Namenode. */ + private String nnFile; @BeforeClass - public static void before() throws Exception { - globalSetUp(); + public static void globalSetUp() throws Exception { + Configuration namenodeConf = new Configuration(); + namenodeConf.setBoolean(DFSConfigKeys.HADOOP_CALLER_CONTEXT_ENABLED_KEY, + true); + cluster = new MiniRouterDFSCluster(false, NUM_SUBCLUSTERS); + cluster.setNumDatanodesPerNameservice(NUM_DNS); + cluster.addNamenodeOverrides(namenodeConf); + cluster.setIndependentDNs(); + + Configuration conf = new Configuration(); + conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 5); + cluster.addNamenodeOverrides(conf); + // Start NNs and DNs and wait until ready. + cluster.startCluster(); + + // Start routers, enable router federation rename. + String journal = "hdfs://" + cluster.getCluster().getNameNode(1) + .getClientNamenodeAddress() + "/journal"; + Configuration routerConf = new RouterConfigBuilder() + .metrics() + .rpc() + .routerRenameOption() + .set(SCHEDULER_JOURNAL_URI, journal) + .set(DFS_ROUTER_FEDERATION_RENAME_MAP, "1") + .set(DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH, "1") + .build(); + // We decrease the DN cache times to make the test faster. + routerConf.setTimeDuration( + RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); + cluster.addRouterOverrides(routerConf); + cluster.startRouters(); + + // Register and verify all NNs with all routers + cluster.registerNamenodes(); + cluster.waitNamenodeRegistration(); + + // We decrease the DN heartbeat expire interval to make them dead faster + cluster.getCluster().getNamesystem(0).getBlockManager() + .getDatanodeManager().setHeartbeatInterval(1); + cluster.getCluster().getNamesystem(1).getBlockManager() + .getDatanodeManager().setHeartbeatInterval(1); + cluster.getCluster().getNamesystem(0).getBlockManager() + .getDatanodeManager().setHeartbeatExpireInterval(3000); + cluster.getCluster().getNamesystem(1).getBlockManager() + .getDatanodeManager().setHeartbeatExpireInterval(3000); + DistCpProcedure.enableForTest(); } @AfterClass - public static void after() { - tearDown(); + public static void tearDown() { + cluster.shutdown(); + DistCpProcedure.disableForTest(); } @Before public void testSetup() throws Exception { - setup(); - router = getRouterContext(); - routerFS = getRouterFileSystem(); - cluster = getCluster(); + + // Create mock locations + cluster.installMockLocations(); + + // Delete all files via the NNs and verify + cluster.deleteAllFiles(); + + // Create test fixtures on NN + cluster.createTestDirectoriesNamenode(); + + // Wait to ensure NN has fully created its test directories + Thread.sleep(100); + + // Random router for this test + RouterContext rndRouter = cluster.getRandomRouter(); + this.setRouter(rndRouter); + + // Create a mount that points to 2 dirs in the same ns: + // /same + // ns0 -> / + // ns0 -> /target-ns0 + for (RouterContext rc : cluster.getRouters()) { + Router r = rc.getRouter(); + MockResolver resolver = (MockResolver) r.getSubclusterResolver(); + List nss = cluster.getNameservices(); + String ns0 = nss.get(0); + resolver.addLocation("/same", ns0, "/"); + resolver.addLocation("/same", ns0, cluster.getNamenodePathForNS(ns0)); + } + + // Pick a namenode for this test + String ns0 = cluster.getNameservices().get(0); + this.setNs(ns0); + this.setNamenode(cluster.getNamenode(ns0, null)); + + // Create a test file on the NN + Random rnd = new Random(); + String randomFile = "testfile-" + rnd.nextInt(); + this.nnFile = + cluster.getNamenodeTestDirectoryForNS(ns) + "/" + randomFile; + + createFile(nnFS, nnFile, 32); + verifyFileExists(nnFS, nnFile); + } + + protected void createDir(FileSystem fs, String dir) throws IOException { + fs.mkdirs(new Path(dir)); + String file = dir + "/file"; + createFile(fs, file, 32); + verifyFileExists(fs, dir); + verifyFileExists(fs, file); } - private void testRenameDir(RouterContext testRouter, String path, + protected void testRenameDir(RouterContext testRouter, String path, String renamedPath, boolean exceptionExpected, Callable call) throws IOException { createDir(testRouter.getFileSystem(), path); @@ -129,6 +219,23 @@ private void testRenameDir(RouterContext testRouter, String path, } } + protected void setRouter(RouterContext r) throws IOException { + this.router = r; + this.routerFS = r.getFileSystem(); + } + + protected void setNs(String nameservice) { + this.ns = nameservice; + } + + protected void setNamenode(NamenodeContext nn) throws IOException { + this.nnFS = nn.getFileSystem(); + } + + protected FileSystem getRouterFileSystem() { + return this.routerFS; + } + @Test public void testSuccessfulRbfRename() throws Exception { List nss = cluster.getNameservices(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java deleted file mode 100644 index 40464923dda0e..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.federation.router; - -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY; -import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; -import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; -import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH; -import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_MAP; -import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ADMIN_ENABLE; -import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI; - -import java.io.IOException; -import java.util.List; -import java.util.Random; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; -import org.apache.hadoop.hdfs.server.federation.MockResolver; -import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; -import org.apache.hadoop.tools.fedbalance.DistCpProcedure; - -/** - * Test base of router federation rename. - */ -public class TestRouterFederationRenameBase { - - static final int NUM_SUBCLUSTERS = 2; - static final int NUM_DNS = 6; - - /** Random Router for this federated cluster. */ - private MiniRouterDFSCluster.RouterContext router; - - /** Random nameservice in the federated cluster. */ - private String ns; - /** Filesystem interface to the Router. */ - private FileSystem routerFS; - /** Filesystem interface to the Namenode. */ - private FileSystem nnFS; - /** File in the Namenode. */ - private String nnFile; - - /** Federated HDFS cluster. */ - private static MiniRouterDFSCluster cluster; - - public static void globalSetUp() throws Exception { - Configuration namenodeConf = new Configuration(); - namenodeConf.setBoolean(DFSConfigKeys.HADOOP_CALLER_CONTEXT_ENABLED_KEY, - true); - namenodeConf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - TestRouterFederationRename.MockGroupsMapping.class.getName()); - cluster = new MiniRouterDFSCluster(false, NUM_SUBCLUSTERS); - cluster.setNumDatanodesPerNameservice(NUM_DNS); - cluster.addNamenodeOverrides(namenodeConf); - cluster.setIndependentDNs(); - - Configuration conf = new Configuration(); - conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 5); - cluster.addNamenodeOverrides(conf); - // Start NNs and DNs and wait until ready. - cluster.startCluster(); - - // Start routers, enable router federation rename. - String journal = "hdfs://" + cluster.getCluster().getNameNode(1) - .getClientNamenodeAddress() + "/journal"; - Configuration routerConf = new RouterConfigBuilder() - .metrics() - .rpc() - .routerRenameOption() - .set(SCHEDULER_JOURNAL_URI, journal) - .set(DFS_ROUTER_FEDERATION_RENAME_MAP, "1") - .set(DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH, "1") - .build(); - // We decrease the DN cache times to make the test faster. - routerConf.setTimeDuration( - RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); - routerConf.setBoolean(DFS_ROUTER_ADMIN_ENABLE, true); - routerConf.setBoolean(DFS_PERMISSIONS_ENABLED_KEY, true); - routerConf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - TestRouterFederationRename.MockGroupsMapping.class.getName()); - cluster.addRouterOverrides(routerConf); - cluster.startRouters(); - - // Register and verify all NNs with all routers - cluster.registerNamenodes(); - cluster.waitNamenodeRegistration(); - - // We decrease the DN heartbeat expire interval to make them dead faster - cluster.getCluster().getNamesystem(0).getBlockManager() - .getDatanodeManager().setHeartbeatInterval(1); - cluster.getCluster().getNamesystem(1).getBlockManager() - .getDatanodeManager().setHeartbeatInterval(1); - cluster.getCluster().getNamesystem(0).getBlockManager() - .getDatanodeManager().setHeartbeatExpireInterval(3000); - cluster.getCluster().getNamesystem(1).getBlockManager() - .getDatanodeManager().setHeartbeatExpireInterval(3000); - DistCpProcedure.enableForTest(); - } - - public static void tearDown() { - cluster.shutdown(); - cluster = null; - DistCpProcedure.disableForTest(); - } - - protected void setup() throws IOException, InterruptedException { - - // Create mock locations - cluster.installMockLocations(); - - // Delete all files via the NNs and verify - cluster.deleteAllFiles(); - - // Create test fixtures on NN - cluster.createTestDirectoriesNamenode(); - - // Random router for this test - MiniRouterDFSCluster.RouterContext rndRouter = cluster.getRandomRouter(); - this.setRouter(rndRouter); - - // Create a mount that points to 2 dirs in the same ns: - // /same - // ns0 -> / - // ns0 -> /target-ns0 - for (MiniRouterDFSCluster.RouterContext rc : cluster.getRouters()) { - Router r = rc.getRouter(); - MockResolver resolver = (MockResolver) r.getSubclusterResolver(); - List nss = cluster.getNameservices(); - String ns0 = nss.get(0); - resolver.addLocation("/same", ns0, "/"); - resolver.addLocation("/same", ns0, cluster.getNamenodePathForNS(ns0)); - } - - // Pick a namenode for this test - String ns0 = cluster.getNameservices().get(0); - this.setNs(ns0); - this.setNamenode(cluster.getNamenode(ns0, null)); - - // Create a test file on the NN - Random rnd = new Random(); - String randomFile = "testfile-" + rnd.nextInt(); - this.nnFile = - cluster.getNamenodeTestDirectoryForNS(ns) + "/" + randomFile; - - createFile(nnFS, nnFile, 32); - verifyFileExists(nnFS, nnFile); - } - - protected void setRouter(MiniRouterDFSCluster.RouterContext r) throws - IOException { - this.router = r; - this.routerFS = r.getFileSystem(); - } - - protected void setNs(String nameservice) { - this.ns = nameservice; - } - - protected void setNamenode(MiniRouterDFSCluster.NamenodeContext nn) - throws IOException { - this.nnFS = nn.getFileSystem(); - } - - protected FileSystem getRouterFileSystem() { - return this.routerFS; - } - - protected void createDir(FileSystem fs, String dir) throws IOException { - fs.mkdirs(new Path(dir)); - String file = dir + "/file"; - createFile(fs, file, 32); - verifyFileExists(fs, dir); - verifyFileExists(fs, file); - } - - public MiniRouterDFSCluster getCluster() { - return cluster; - } - - public MiniRouterDFSCluster.RouterContext getRouterContext() { - return router; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java index 28956928d4db6..369508f3b22f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java @@ -18,11 +18,9 @@ package org.apache.hadoop.hdfs.server.federation.router; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.ClientBaseWithFixes; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -33,8 +31,6 @@ import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.authorize.ImpersonationProvider; import org.apache.hadoop.tools.fedbalance.DistCpProcedure; import org.junit.After; import org.junit.AfterClass; @@ -44,13 +40,11 @@ import java.io.File; import java.io.IOException; -import java.net.InetAddress; import java.security.PrivilegedExceptionAction; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS; import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; @@ -122,35 +116,10 @@ public static void globalSetUp() throws Exception { baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, DFS_DATA_TRANSFER_PROTECTION_DEFAULT); baseConf.setBoolean(IGNORE_SECURE_PORTS_FOR_TESTING_KEY, true); - baseConf.setClass(HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS, - AllowUserImpersonationProvider.class, ImpersonationProvider.class); DistCpProcedure.enableForTest(); } - /** - * {@link ImpersonationProvider} that confirms the user doing the - * impersonating is the same as the user running the MiniCluster. - */ - private static class AllowUserImpersonationProvider extends Configured - implements ImpersonationProvider { - public void init(String configurationPrefix) { - // Do nothing - } - - public void authorize(UserGroupInformation user, InetAddress remoteAddress) - throws AuthorizationException { - try { - if (!user.getRealUser().getShortUserName() - .equals(UserGroupInformation.getCurrentUser().getShortUserName())) { - throw new AuthorizationException(); - } - } catch (IOException ioe) { - throw new AuthorizationException(ioe); - } - } - } - @AfterClass public static void globalTearDown() { kdc.stop(); @@ -222,26 +191,18 @@ public void setUp() throws Exception { setRouter(rndRouter); } - protected void prepareEnv(FileSystem fs, Path path, Path renamedPath) - throws IOException { - // Set permission of parent to 777. - fs.setPermission(path.getParent(), - FsPermission.createImmutable((short)511)); - fs.setPermission(renamedPath.getParent(), - FsPermission.createImmutable((short)511)); - // Create src path and file. - fs.mkdirs(path); - String file = path.toString() + "/file"; + protected void createDir(FileSystem fs, String dir) throws IOException { + fs.mkdirs(new Path(dir)); + String file = dir + "/file"; createFile(fs, file, 32); - verifyFileExists(fs, path.toString()); + verifyFileExists(fs, dir); verifyFileExists(fs, file); } protected void testRenameDir(RouterContext testRouter, String path, String renamedPath, boolean exceptionExpected, Callable call) throws IOException { - prepareEnv(testRouter.getFileSystem(), new Path(path), - new Path(renamedPath)); + createDir(testRouter.getFileSystem(), path); // rename boolean exceptionThrown = false; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java deleted file mode 100644 index cb828db1b5587..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java +++ /dev/null @@ -1,246 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.federation.router; - -import static org.apache.hadoop.fs.permission.FsAction.ALL; -import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE; -import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; -import static org.apache.hadoop.test.GenericTestUtils.getMethodName; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; - -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.AclEntry; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.permission.AclEntryScope; -import org.apache.hadoop.fs.permission.AclEntryType; -import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.protocol.ClientProtocol; -import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; -import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -/** - * Test permission check of router federation rename. - */ -public class TestRouterFederationRenamePermission - extends TestRouterFederationRenameBase { - - private String srcNs; // the source namespace. - private String dstNs; // the dst namespace. - // the source path. - private String srcStr; - private Path srcPath; - // the dst path. - private String dstStr; - private Path dstPath; - private UserGroupInformation foo; - private MiniRouterDFSCluster.RouterContext router; - private FileSystem routerFS; - private MiniRouterDFSCluster cluster; - - @BeforeClass - public static void before() throws Exception { - globalSetUp(); - } - - @AfterClass - public static void after() { - tearDown(); - } - - @Before - public void testSetup() throws Exception { - setup(); - cluster = getCluster(); - List nss = cluster.getNameservices(); - srcNs = nss.get(0); - dstNs = nss.get(1); - srcStr = cluster.getFederatedTestDirectoryForNS(srcNs) + "/d0/" - + getMethodName(); - dstStr = cluster.getFederatedTestDirectoryForNS(dstNs) + "/d0/" - + getMethodName(); - srcPath = new Path(srcStr); - dstPath = new Path(dstStr); - foo = UserGroupInformation.createRemoteUser("foo"); - router = getRouterContext(); - routerFS = getRouterFileSystem(); - } - - @Test - public void testRenameSnapshotPath() throws Exception { - LambdaTestUtils.intercept(IOException.class, - "Router federation rename can't rename snapshot path", - "Expect IOException.", () -> RouterFederationRename.checkSnapshotPath( - new RemoteLocation(srcNs, "/foo/.snapshot/src", "/src"), - new RemoteLocation(dstNs, "/foo/dst", "/dst"))); - LambdaTestUtils.intercept(IOException.class, - "Router federation rename can't rename snapshot path", - "Expect IOException.", () -> RouterFederationRename - .checkSnapshotPath(new RemoteLocation(srcNs, "/foo/src", "/src"), - new RemoteLocation(dstNs, "/foo/.snapshot/dst", "/dst"))); - } - - // Case1: the source path doesn't exist. - @Test - public void testPermission1() throws Exception { - LambdaTestUtils.intercept(RemoteException.class, "FileNotFoundException", - "Expect FileNotFoundException.", () -> { - DFSClient client = router.getClient(foo); - ClientProtocol clientProtocol = client.getNamenode(); - clientProtocol.rename(srcStr, dstStr); - }); - } - - // Case2: the source path parent without any permission. - @Test - public void testPermission2() throws Exception { - createDir(routerFS, srcStr); - routerFS.setPermission(srcPath.getParent(), - FsPermission.createImmutable((short) 0)); - LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", - "Expect AccessControlException.", () -> { - DFSClient client = router.getClient(foo); - ClientProtocol clientProtocol = client.getNamenode(); - clientProtocol.rename(srcStr, dstStr); - }); - } - - // Case3: the source path with rwxr-xr-x permission. - @Test - public void testPermission3() throws Exception { - createDir(routerFS, srcStr); - routerFS.setPermission(srcPath.getParent(), - FsPermission.createImmutable((short) 493)); - LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", - "Expect AccessControlException.", () -> { - DFSClient client = router.getClient(foo); - ClientProtocol clientProtocol = client.getNamenode(); - clientProtocol.rename(srcStr, dstStr); - }); - } - - // Case4: the source path with unrelated acl user:not-foo:rwx. - @Test - public void testPermission4() throws Exception { - createDir(routerFS, srcStr); - routerFS.setAcl(srcPath.getParent(), buildAcl("not-foo", ALL)); - LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", - "Expect AccessControlException.", () -> { - DFSClient client = router.getClient(foo); - ClientProtocol clientProtocol = client.getNamenode(); - clientProtocol.rename(srcStr, dstStr); - }); - } - - // Case5: the source path with user:foo:rwx. And the dst path doesn't exist. - @Test - public void testPermission5() throws Exception { - createDir(routerFS, srcStr); - routerFS.setAcl(srcPath.getParent(), buildAcl("foo", ALL)); - assertFalse(routerFS.exists(dstPath.getParent())); - LambdaTestUtils.intercept(RemoteException.class, "FileNotFoundException", - "Expect FileNotFoundException.", () -> { - DFSClient client = router.getClient(foo); - ClientProtocol clientProtocol = client.getNamenode(); - clientProtocol.rename(srcStr, dstStr); - }); - } - - // Case6: the src path with correct permission and the dst path with bad - // permission. - @Test - public void testPermission6() throws Exception { - createDir(routerFS, srcStr); - routerFS.setAcl(srcPath.getParent(), buildAcl("foo", ALL)); - assertTrue(routerFS.mkdirs(dstPath.getParent())); - LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", - "Expect AccessControlException.", () -> { - DFSClient client = router.getClient(foo); - ClientProtocol clientProtocol = client.getNamenode(); - clientProtocol.rename(srcStr, dstStr); - }); - } - - // Case7: successful rename. - @Test - public void testPermission7() throws Exception { - createDir(routerFS, srcStr); - routerFS.setAcl(srcPath.getParent(), buildAcl("foo", ALL)); - assertTrue(routerFS.mkdirs(dstPath.getParent())); - routerFS.setOwner(dstPath.getParent(), "foo", "foogroup"); - DFSClient client = router.getClient(foo); - ClientProtocol clientProtocol = client.getNamenode(); - clientProtocol.rename(srcStr, dstStr); - assertFalse(verifyFileExists(routerFS, srcStr)); - assertTrue( - verifyFileExists(routerFS, dstStr + "/file")); - } - - /** - * Build acl list. - * - * user::rwx - * group::rwx - * user:input_user:input_permission - * other::r-x - * @param user the input user. - * @param permission the input fs action. - */ - private List buildAcl(String user, FsAction permission) { - List aclEntryList = Lists.newArrayList(); - aclEntryList.add( - new AclEntry.Builder() - .setName(user) - .setPermission(permission) - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.USER) - .build()); - aclEntryList.add( - new AclEntry.Builder() - .setPermission(FsAction.ALL) - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.USER) - .build()); - aclEntryList.add( - new AclEntry.Builder() - .setPermission(FsAction.ALL) - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.GROUP) - .build()); - aclEntryList.add( - new AclEntry.Builder() - .setPermission(READ_EXECUTE) - .setScope(AclEntryScope.ACCESS) - .setType(AclEntryType.OTHER) - .build()); - return aclEntryList; - } -} From 8d3fe5f053e792ef18183080238dc89229c0c937 Mon Sep 17 00:00:00 2001 From: Jinglun Date: Sun, 30 May 2021 19:28:40 +0800 Subject: [PATCH 369/666] HDFS-15973. RBF: Add permission check before doing router federation rename. Contributed by Jinglun. Reviewed-by: Inigo Goiri --- .../router/RouterFederationRename.java | 72 ++++- .../src/site/markdown/HDFSRouterFederation.md | 5 +- .../router/TestRouterFederationRename.java | 183 +++---------- .../TestRouterFederationRenameBase.java | 203 +++++++++++++++ ...stRouterFederationRenameInKerberosEnv.java | 49 +++- .../TestRouterFederationRenamePermission.java | 246 ++++++++++++++++++ 6 files changed, 606 insertions(+), 152 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java index d2bf989f1f46e..aafb685b88626 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFederationRename.java @@ -21,7 +21,11 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.fedbalance.DistCpProcedure; import org.apache.hadoop.tools.fedbalance.FedBalanceConfigs; @@ -55,7 +59,11 @@ import org.slf4j.LoggerFactory; /** - * Rename across router based federation namespaces. + * Rename across router federation namespaces based on federation balance. Both + * the src and the dst coming from different namespaces need to have only one + * destination. Snapshot paths are not allowed. + * Users need write privilege of both src parent and dst parent to do router + * federation rename. */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -99,6 +107,8 @@ boolean routerFedRename(final String src, final String dst, } RemoteLocation srcLoc = srcLocations.get(0); RemoteLocation dstLoc = dstLocations.get(0); + checkSnapshotPath(srcLoc, dstLoc); + checkPermission(srcLoc, dstLoc); UserGroupInformation routerUser = UserGroupInformation.getLoginUser(); @@ -131,6 +141,66 @@ boolean routerFedRename(final String src, final String dst, } } + /** + * Check router federation rename permission. + */ + private void checkPermission(RemoteLocation src, RemoteLocation dst) + throws IOException { + try { + if (UserGroupInformation.isSecurityEnabled()) { + // In security mode, check permission as remote user proxy by router + // user. + String remoteUserName = NameNode.getRemoteUser().getShortUserName(); + UserGroupInformation proxyUser = UserGroupInformation + .createProxyUser(remoteUserName, + UserGroupInformation.getLoginUser()); + proxyUser.doAs((PrivilegedExceptionAction) () -> { + checkRenamePermission(src, dst); + return null; + }); + } else { + // In simple mode, check permission as remote user directly. + checkRenamePermission(src, dst); + } + } catch (AccessControlException e) { + throw new AccessControlException( + "Permission denied rename " + src.getSrc() + "(" + src + ") to " + dst + .getSrc() + "(" + dst + ") Reason=" + e.getMessage()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new InterruptedIOException( + "Router Federation Rename is interrupted while checking permission."); + } + } + + private void checkRenamePermission(RemoteLocation srcLoc, + RemoteLocation dstLoc) throws IOException { + // check src path permission. + Path srcPath = + new Path("hdfs://" + srcLoc.getNameserviceId() + srcLoc.getDest()); + srcPath.getFileSystem(conf).access(srcPath.getParent(), FsAction.WRITE); + // check dst path permission. + Path dstPath = + new Path("hdfs://" + dstLoc.getNameserviceId() + dstLoc.getDest()); + dstPath.getFileSystem(conf).access(dstPath.getParent(), FsAction.WRITE); + } + + static void checkSnapshotPath(RemoteLocation src, RemoteLocation dst) + throws AccessControlException { + if (src.getDest() + .contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) { + throw new AccessControlException( + "Router federation rename can't rename snapshot path. src=" + src + .getSrc() + "(" + src + ")"); + } + if (dst.getDest() + .contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) { + throw new AccessControlException( + "Router federation rename can't rename snapshot path. dst=" + dst + .getSrc() + "(" + dst + ")"); + } + } + /** * Build router federation rename job moving data from src to dst. * @param srcNs the source namespace id. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md index d7838c75f3804..151289fa7f7d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md @@ -514,7 +514,10 @@ More metrics info can see [RBF Metrics](../../hadoop-project-dist/hadoop-common/ Router Federation Rename ------- -Enable Router to rename across namespaces. Currently the router federation rename is implemented by distcp. We must set the rpc timeout high enough so it won't timeout. +Enable Router to rename across namespaces. Currently it is implemented based on [HDFS Federation Balance](../../../hadoop-federation-balance/HDFSFederationBalance.md) and has some limits comparing with normal rename. +1. It is much slower than the normal rename so need a longer RPC timeout configuration. See `ipc.client.rpc-timeout.ms` and its description for more information about RPC timeout. +2. It doesn't support snapshot path. +3. It doesn't support to rename path with multiple destinations. | Property | Default | Description| |:---- |:---- |:---- | diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java index a9a17b3ef5025..3f73d951bcf68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java @@ -19,38 +19,32 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; -import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH; -import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_MAP; import static org.apache.hadoop.test.GenericTestUtils.getMethodName; -import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.verify; import java.io.IOException; -import java.util.Arrays; import java.util.List; -import java.util.Random; +import java.util.Arrays; +import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; -import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext; import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext; -import org.apache.hadoop.hdfs.server.federation.MockResolver; -import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.tools.fedbalance.DistCpProcedure; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -60,137 +54,53 @@ /** * Basic tests of router federation rename. Rename across namespaces. */ -public class TestRouterFederationRename { - - private static final int NUM_SUBCLUSTERS = 2; - private static final int NUM_DNS = 6; +public class TestRouterFederationRename extends TestRouterFederationRenameBase { - /** Federated HDFS cluster. */ - private static MiniRouterDFSCluster cluster; - - /** Random Router for this federated cluster. */ - private RouterContext router; + public static class MockGroupsMapping implements + GroupMappingServiceProvider { - /** Random nameservice in the federated cluster. */ - private String ns; - /** Filesystem interface to the Router. */ - private FileSystem routerFS; - /** Filesystem interface to the Namenode. */ - private FileSystem nnFS; - /** File in the Namenode. */ - private String nnFile; + @Override + public List getGroups(String user) { + return Arrays.asList(user+"_group"); + } - @BeforeClass - public static void globalSetUp() throws Exception { - Configuration namenodeConf = new Configuration(); - namenodeConf.setBoolean(DFSConfigKeys.HADOOP_CALLER_CONTEXT_ENABLED_KEY, - true); - cluster = new MiniRouterDFSCluster(false, NUM_SUBCLUSTERS); - cluster.setNumDatanodesPerNameservice(NUM_DNS); - cluster.addNamenodeOverrides(namenodeConf); - cluster.setIndependentDNs(); + @Override + public void cacheGroupsRefresh() { + } - Configuration conf = new Configuration(); - conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 5); - cluster.addNamenodeOverrides(conf); - // Start NNs and DNs and wait until ready. - cluster.startCluster(); + @Override + public void cacheGroupsAdd(List groups) { + } - // Start routers, enable router federation rename. - String journal = "hdfs://" + cluster.getCluster().getNameNode(1) - .getClientNamenodeAddress() + "/journal"; - Configuration routerConf = new RouterConfigBuilder() - .metrics() - .rpc() - .routerRenameOption() - .set(SCHEDULER_JOURNAL_URI, journal) - .set(DFS_ROUTER_FEDERATION_RENAME_MAP, "1") - .set(DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH, "1") - .build(); - // We decrease the DN cache times to make the test faster. - routerConf.setTimeDuration( - RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); - cluster.addRouterOverrides(routerConf); - cluster.startRouters(); + @Override + public Set getGroupsSet(String user) { + return ImmutableSet.of(user+"_group"); + } + } - // Register and verify all NNs with all routers - cluster.registerNamenodes(); - cluster.waitNamenodeRegistration(); + private RouterContext router; + private FileSystem routerFS; + private MiniRouterDFSCluster cluster; - // We decrease the DN heartbeat expire interval to make them dead faster - cluster.getCluster().getNamesystem(0).getBlockManager() - .getDatanodeManager().setHeartbeatInterval(1); - cluster.getCluster().getNamesystem(1).getBlockManager() - .getDatanodeManager().setHeartbeatInterval(1); - cluster.getCluster().getNamesystem(0).getBlockManager() - .getDatanodeManager().setHeartbeatExpireInterval(3000); - cluster.getCluster().getNamesystem(1).getBlockManager() - .getDatanodeManager().setHeartbeatExpireInterval(3000); - DistCpProcedure.enableForTest(); + @BeforeClass + public static void before() throws Exception { + globalSetUp(); } @AfterClass - public static void tearDown() { - cluster.shutdown(); - DistCpProcedure.disableForTest(); + public static void after() { + tearDown(); } @Before public void testSetup() throws Exception { - - // Create mock locations - cluster.installMockLocations(); - - // Delete all files via the NNs and verify - cluster.deleteAllFiles(); - - // Create test fixtures on NN - cluster.createTestDirectoriesNamenode(); - - // Wait to ensure NN has fully created its test directories - Thread.sleep(100); - - // Random router for this test - RouterContext rndRouter = cluster.getRandomRouter(); - this.setRouter(rndRouter); - - // Create a mount that points to 2 dirs in the same ns: - // /same - // ns0 -> / - // ns0 -> /target-ns0 - for (RouterContext rc : cluster.getRouters()) { - Router r = rc.getRouter(); - MockResolver resolver = (MockResolver) r.getSubclusterResolver(); - List nss = cluster.getNameservices(); - String ns0 = nss.get(0); - resolver.addLocation("/same", ns0, "/"); - resolver.addLocation("/same", ns0, cluster.getNamenodePathForNS(ns0)); - } - - // Pick a namenode for this test - String ns0 = cluster.getNameservices().get(0); - this.setNs(ns0); - this.setNamenode(cluster.getNamenode(ns0, null)); - - // Create a test file on the NN - Random rnd = new Random(); - String randomFile = "testfile-" + rnd.nextInt(); - this.nnFile = - cluster.getNamenodeTestDirectoryForNS(ns) + "/" + randomFile; - - createFile(nnFS, nnFile, 32); - verifyFileExists(nnFS, nnFile); - } - - protected void createDir(FileSystem fs, String dir) throws IOException { - fs.mkdirs(new Path(dir)); - String file = dir + "/file"; - createFile(fs, file, 32); - verifyFileExists(fs, dir); - verifyFileExists(fs, file); + setup(); + router = getRouterContext(); + routerFS = getRouterFileSystem(); + cluster = getCluster(); } - protected void testRenameDir(RouterContext testRouter, String path, + private void testRenameDir(RouterContext testRouter, String path, String renamedPath, boolean exceptionExpected, Callable call) throws IOException { createDir(testRouter.getFileSystem(), path); @@ -219,23 +129,6 @@ protected void testRenameDir(RouterContext testRouter, String path, } } - protected void setRouter(RouterContext r) throws IOException { - this.router = r; - this.routerFS = r.getFileSystem(); - } - - protected void setNs(String nameservice) { - this.ns = nameservice; - } - - protected void setNamenode(NamenodeContext nn) throws IOException { - this.nnFS = nn.getFileSystem(); - } - - protected FileSystem getRouterFileSystem() { - return this.routerFS; - } - @Test public void testSuccessfulRbfRename() throws Exception { List nss = cluster.getNameservices(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java new file mode 100644 index 0000000000000..40464923dda0e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameBase.java @@ -0,0 +1,203 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FEDERATION_RENAME_MAP; +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ADMIN_ENABLE; +import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI; + +import java.io.IOException; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.MockResolver; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.tools.fedbalance.DistCpProcedure; + +/** + * Test base of router federation rename. + */ +public class TestRouterFederationRenameBase { + + static final int NUM_SUBCLUSTERS = 2; + static final int NUM_DNS = 6; + + /** Random Router for this federated cluster. */ + private MiniRouterDFSCluster.RouterContext router; + + /** Random nameservice in the federated cluster. */ + private String ns; + /** Filesystem interface to the Router. */ + private FileSystem routerFS; + /** Filesystem interface to the Namenode. */ + private FileSystem nnFS; + /** File in the Namenode. */ + private String nnFile; + + /** Federated HDFS cluster. */ + private static MiniRouterDFSCluster cluster; + + public static void globalSetUp() throws Exception { + Configuration namenodeConf = new Configuration(); + namenodeConf.setBoolean(DFSConfigKeys.HADOOP_CALLER_CONTEXT_ENABLED_KEY, + true); + namenodeConf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, + TestRouterFederationRename.MockGroupsMapping.class.getName()); + cluster = new MiniRouterDFSCluster(false, NUM_SUBCLUSTERS); + cluster.setNumDatanodesPerNameservice(NUM_DNS); + cluster.addNamenodeOverrides(namenodeConf); + cluster.setIndependentDNs(); + + Configuration conf = new Configuration(); + conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 5); + cluster.addNamenodeOverrides(conf); + // Start NNs and DNs and wait until ready. + cluster.startCluster(); + + // Start routers, enable router federation rename. + String journal = "hdfs://" + cluster.getCluster().getNameNode(1) + .getClientNamenodeAddress() + "/journal"; + Configuration routerConf = new RouterConfigBuilder() + .metrics() + .rpc() + .routerRenameOption() + .set(SCHEDULER_JOURNAL_URI, journal) + .set(DFS_ROUTER_FEDERATION_RENAME_MAP, "1") + .set(DFS_ROUTER_FEDERATION_RENAME_BANDWIDTH, "1") + .build(); + // We decrease the DN cache times to make the test faster. + routerConf.setTimeDuration( + RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS); + routerConf.setBoolean(DFS_ROUTER_ADMIN_ENABLE, true); + routerConf.setBoolean(DFS_PERMISSIONS_ENABLED_KEY, true); + routerConf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, + TestRouterFederationRename.MockGroupsMapping.class.getName()); + cluster.addRouterOverrides(routerConf); + cluster.startRouters(); + + // Register and verify all NNs with all routers + cluster.registerNamenodes(); + cluster.waitNamenodeRegistration(); + + // We decrease the DN heartbeat expire interval to make them dead faster + cluster.getCluster().getNamesystem(0).getBlockManager() + .getDatanodeManager().setHeartbeatInterval(1); + cluster.getCluster().getNamesystem(1).getBlockManager() + .getDatanodeManager().setHeartbeatInterval(1); + cluster.getCluster().getNamesystem(0).getBlockManager() + .getDatanodeManager().setHeartbeatExpireInterval(3000); + cluster.getCluster().getNamesystem(1).getBlockManager() + .getDatanodeManager().setHeartbeatExpireInterval(3000); + DistCpProcedure.enableForTest(); + } + + public static void tearDown() { + cluster.shutdown(); + cluster = null; + DistCpProcedure.disableForTest(); + } + + protected void setup() throws IOException, InterruptedException { + + // Create mock locations + cluster.installMockLocations(); + + // Delete all files via the NNs and verify + cluster.deleteAllFiles(); + + // Create test fixtures on NN + cluster.createTestDirectoriesNamenode(); + + // Random router for this test + MiniRouterDFSCluster.RouterContext rndRouter = cluster.getRandomRouter(); + this.setRouter(rndRouter); + + // Create a mount that points to 2 dirs in the same ns: + // /same + // ns0 -> / + // ns0 -> /target-ns0 + for (MiniRouterDFSCluster.RouterContext rc : cluster.getRouters()) { + Router r = rc.getRouter(); + MockResolver resolver = (MockResolver) r.getSubclusterResolver(); + List nss = cluster.getNameservices(); + String ns0 = nss.get(0); + resolver.addLocation("/same", ns0, "/"); + resolver.addLocation("/same", ns0, cluster.getNamenodePathForNS(ns0)); + } + + // Pick a namenode for this test + String ns0 = cluster.getNameservices().get(0); + this.setNs(ns0); + this.setNamenode(cluster.getNamenode(ns0, null)); + + // Create a test file on the NN + Random rnd = new Random(); + String randomFile = "testfile-" + rnd.nextInt(); + this.nnFile = + cluster.getNamenodeTestDirectoryForNS(ns) + "/" + randomFile; + + createFile(nnFS, nnFile, 32); + verifyFileExists(nnFS, nnFile); + } + + protected void setRouter(MiniRouterDFSCluster.RouterContext r) throws + IOException { + this.router = r; + this.routerFS = r.getFileSystem(); + } + + protected void setNs(String nameservice) { + this.ns = nameservice; + } + + protected void setNamenode(MiniRouterDFSCluster.NamenodeContext nn) + throws IOException { + this.nnFS = nn.getFileSystem(); + } + + protected FileSystem getRouterFileSystem() { + return this.routerFS; + } + + protected void createDir(FileSystem fs, String dir) throws IOException { + fs.mkdirs(new Path(dir)); + String file = dir + "/file"; + createFile(fs, file, 32); + verifyFileExists(fs, dir); + verifyFileExists(fs, file); + } + + public MiniRouterDFSCluster getCluster() { + return cluster; + } + + public MiniRouterDFSCluster.RouterContext getRouterContext() { + return router; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java index 369508f3b22f7..28956928d4db6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java @@ -18,9 +18,11 @@ package org.apache.hadoop.hdfs.server.federation.router; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.ClientBaseWithFixes; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -31,6 +33,8 @@ import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ImpersonationProvider; import org.apache.hadoop.tools.fedbalance.DistCpProcedure; import org.junit.After; import org.junit.AfterClass; @@ -40,11 +44,13 @@ import java.io.File; import java.io.IOException; +import java.net.InetAddress; import java.security.PrivilegedExceptionAction; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS; import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; @@ -116,10 +122,35 @@ public static void globalSetUp() throws Exception { baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, DFS_DATA_TRANSFER_PROTECTION_DEFAULT); baseConf.setBoolean(IGNORE_SECURE_PORTS_FOR_TESTING_KEY, true); + baseConf.setClass(HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS, + AllowUserImpersonationProvider.class, ImpersonationProvider.class); DistCpProcedure.enableForTest(); } + /** + * {@link ImpersonationProvider} that confirms the user doing the + * impersonating is the same as the user running the MiniCluster. + */ + private static class AllowUserImpersonationProvider extends Configured + implements ImpersonationProvider { + public void init(String configurationPrefix) { + // Do nothing + } + + public void authorize(UserGroupInformation user, InetAddress remoteAddress) + throws AuthorizationException { + try { + if (!user.getRealUser().getShortUserName() + .equals(UserGroupInformation.getCurrentUser().getShortUserName())) { + throw new AuthorizationException(); + } + } catch (IOException ioe) { + throw new AuthorizationException(ioe); + } + } + } + @AfterClass public static void globalTearDown() { kdc.stop(); @@ -191,18 +222,26 @@ public void setUp() throws Exception { setRouter(rndRouter); } - protected void createDir(FileSystem fs, String dir) throws IOException { - fs.mkdirs(new Path(dir)); - String file = dir + "/file"; + protected void prepareEnv(FileSystem fs, Path path, Path renamedPath) + throws IOException { + // Set permission of parent to 777. + fs.setPermission(path.getParent(), + FsPermission.createImmutable((short)511)); + fs.setPermission(renamedPath.getParent(), + FsPermission.createImmutable((short)511)); + // Create src path and file. + fs.mkdirs(path); + String file = path.toString() + "/file"; createFile(fs, file, 32); - verifyFileExists(fs, dir); + verifyFileExists(fs, path.toString()); verifyFileExists(fs, file); } protected void testRenameDir(RouterContext testRouter, String path, String renamedPath, boolean exceptionExpected, Callable call) throws IOException { - createDir(testRouter.getFileSystem(), path); + prepareEnv(testRouter.getFileSystem(), new Path(path), + new Path(renamedPath)); // rename boolean exceptionThrown = false; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java new file mode 100644 index 0000000000000..cb828db1b5587 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java @@ -0,0 +1,246 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.fs.permission.FsAction.ALL; +import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists; +import static org.apache.hadoop.test.GenericTestUtils.getMethodName; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Test permission check of router federation rename. + */ +public class TestRouterFederationRenamePermission + extends TestRouterFederationRenameBase { + + private String srcNs; // the source namespace. + private String dstNs; // the dst namespace. + // the source path. + private String srcStr; + private Path srcPath; + // the dst path. + private String dstStr; + private Path dstPath; + private UserGroupInformation foo; + private MiniRouterDFSCluster.RouterContext router; + private FileSystem routerFS; + private MiniRouterDFSCluster cluster; + + @BeforeClass + public static void before() throws Exception { + globalSetUp(); + } + + @AfterClass + public static void after() { + tearDown(); + } + + @Before + public void testSetup() throws Exception { + setup(); + cluster = getCluster(); + List nss = cluster.getNameservices(); + srcNs = nss.get(0); + dstNs = nss.get(1); + srcStr = cluster.getFederatedTestDirectoryForNS(srcNs) + "/d0/" + + getMethodName(); + dstStr = cluster.getFederatedTestDirectoryForNS(dstNs) + "/d0/" + + getMethodName(); + srcPath = new Path(srcStr); + dstPath = new Path(dstStr); + foo = UserGroupInformation.createRemoteUser("foo"); + router = getRouterContext(); + routerFS = getRouterFileSystem(); + } + + @Test + public void testRenameSnapshotPath() throws Exception { + LambdaTestUtils.intercept(IOException.class, + "Router federation rename can't rename snapshot path", + "Expect IOException.", () -> RouterFederationRename.checkSnapshotPath( + new RemoteLocation(srcNs, "/foo/.snapshot/src", "/src"), + new RemoteLocation(dstNs, "/foo/dst", "/dst"))); + LambdaTestUtils.intercept(IOException.class, + "Router federation rename can't rename snapshot path", + "Expect IOException.", () -> RouterFederationRename + .checkSnapshotPath(new RemoteLocation(srcNs, "/foo/src", "/src"), + new RemoteLocation(dstNs, "/foo/.snapshot/dst", "/dst"))); + } + + // Case1: the source path doesn't exist. + @Test + public void testPermission1() throws Exception { + LambdaTestUtils.intercept(RemoteException.class, "FileNotFoundException", + "Expect FileNotFoundException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case2: the source path parent without any permission. + @Test + public void testPermission2() throws Exception { + createDir(routerFS, srcStr); + routerFS.setPermission(srcPath.getParent(), + FsPermission.createImmutable((short) 0)); + LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", + "Expect AccessControlException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case3: the source path with rwxr-xr-x permission. + @Test + public void testPermission3() throws Exception { + createDir(routerFS, srcStr); + routerFS.setPermission(srcPath.getParent(), + FsPermission.createImmutable((short) 493)); + LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", + "Expect AccessControlException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case4: the source path with unrelated acl user:not-foo:rwx. + @Test + public void testPermission4() throws Exception { + createDir(routerFS, srcStr); + routerFS.setAcl(srcPath.getParent(), buildAcl("not-foo", ALL)); + LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", + "Expect AccessControlException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case5: the source path with user:foo:rwx. And the dst path doesn't exist. + @Test + public void testPermission5() throws Exception { + createDir(routerFS, srcStr); + routerFS.setAcl(srcPath.getParent(), buildAcl("foo", ALL)); + assertFalse(routerFS.exists(dstPath.getParent())); + LambdaTestUtils.intercept(RemoteException.class, "FileNotFoundException", + "Expect FileNotFoundException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case6: the src path with correct permission and the dst path with bad + // permission. + @Test + public void testPermission6() throws Exception { + createDir(routerFS, srcStr); + routerFS.setAcl(srcPath.getParent(), buildAcl("foo", ALL)); + assertTrue(routerFS.mkdirs(dstPath.getParent())); + LambdaTestUtils.intercept(RemoteException.class, "AccessControlException", + "Expect AccessControlException.", () -> { + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + }); + } + + // Case7: successful rename. + @Test + public void testPermission7() throws Exception { + createDir(routerFS, srcStr); + routerFS.setAcl(srcPath.getParent(), buildAcl("foo", ALL)); + assertTrue(routerFS.mkdirs(dstPath.getParent())); + routerFS.setOwner(dstPath.getParent(), "foo", "foogroup"); + DFSClient client = router.getClient(foo); + ClientProtocol clientProtocol = client.getNamenode(); + clientProtocol.rename(srcStr, dstStr); + assertFalse(verifyFileExists(routerFS, srcStr)); + assertTrue( + verifyFileExists(routerFS, dstStr + "/file")); + } + + /** + * Build acl list. + * + * user::rwx + * group::rwx + * user:input_user:input_permission + * other::r-x + * @param user the input user. + * @param permission the input fs action. + */ + private List buildAcl(String user, FsAction permission) { + List aclEntryList = Lists.newArrayList(); + aclEntryList.add( + new AclEntry.Builder() + .setName(user) + .setPermission(permission) + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER) + .build()); + aclEntryList.add( + new AclEntry.Builder() + .setPermission(FsAction.ALL) + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER) + .build()); + aclEntryList.add( + new AclEntry.Builder() + .setPermission(FsAction.ALL) + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.GROUP) + .build()); + aclEntryList.add( + new AclEntry.Builder() + .setPermission(READ_EXECUTE) + .setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER) + .build()); + return aclEntryList; + } +} From a2144e3d9e389348c04a4a06b6b77e92a95b63a9 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Mon, 31 May 2021 21:43:26 +0100 Subject: [PATCH 370/666] HADOOP-17735. Upgrade AWS SDK to 1.11.1026 (#3057) --- hadoop-project/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 3033b40b8d106..817a54db59a03 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -182,7 +182,7 @@ 1.3.1 1.0-beta-1 900 - 1.11.901 + 1.11.1026 2.3.4 1.11.2 2.1 From fa9515b4a5366639301134a9d754b4c611854536 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Tue, 1 Jun 2021 08:34:56 +0530 Subject: [PATCH 371/666] HDFS-16051. Misspelt words in DataXceiver.java line 881 and line 885. Contributed by Ning Sheng. --- .../org/apache/hadoop/hdfs/server/datanode/DataXceiver.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index c43fc3dae0ee1..082259cb8f92b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -878,11 +878,11 @@ public void writeBlock(final ExtendedBlock block, IOUtils.closeSocket(mirrorSock); mirrorSock = null; if (isClient) { - LOG.error("{}:Exception transfering block {} to mirror {}", + LOG.error("{}:Exception transferring block {} to mirror {}", datanode, block, mirrorNode, e); throw e; } else { - LOG.info("{}:Exception transfering {} to mirror {}- continuing " + + LOG.info("{}:Exception transferring {} to mirror {}- continuing " + "without the mirror", datanode, block, mirrorNode, e); incrDatanodeNetworkErrors(); } From 88e4fa0988401594d07ad75b11a036b34a77bd38 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Tue, 1 Jun 2021 08:38:13 +0530 Subject: [PATCH 372/666] Revert "HDFS-15982. Deleted data using HTTP API should be saved to the trash (#2927)" This reverts commit 041488e8f533a27e9497e86bb779c0b82c2e1d71. --- .../web/resources/DeleteSkipTrashParam.java | 50 --------------- .../fs/http/client/HttpFSFileSystem.java | 1 - .../hadoop/fs/http/server/FSOperations.java | 28 +-------- .../http/server/HttpFSParametersProvider.java | 22 +------ .../hadoop/fs/http/server/HttpFSServer.java | 9 +-- .../fs/http/server/TestHttpFSServer.java | 62 ------------------- .../src/main/webapps/router/explorer.html | 23 +------ .../src/main/webapps/router/explorer.js | 50 ++++----------- .../web/resources/NamenodeWebHdfsMethods.java | 61 +++++------------- .../src/main/webapps/hdfs/explorer.html | 23 +------ .../src/main/webapps/hdfs/explorer.js | 46 ++++---------- .../hadoop-hdfs/src/site/markdown/WebHDFS.md | 6 +- .../apache/hadoop/hdfs/web/TestWebHDFS.java | 35 +---------- 13 files changed, 53 insertions(+), 363 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java deleted file mode 100644 index 5ca9d69d7c870..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.web.resources; - -/** - * SkipTrash param to be used by DELETE query. - */ -public class DeleteSkipTrashParam extends BooleanParam { - - public static final String NAME = "skiptrash"; - public static final String DEFAULT = FALSE; - - private static final Domain DOMAIN = new Domain(NAME); - - /** - * Constructor. - * @param value the parameter value. - */ - public DeleteSkipTrashParam(final Boolean value) { - super(DOMAIN, value); - } - - /** - * Constructor. - * @param str a string representation of the parameter value. - */ - public DeleteSkipTrashParam(final String str) { - this(DOMAIN.parse(str)); - } - - @Override - public String getName() { - return NAME; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 3cf7dad1dad31..2866044badcd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -122,7 +122,6 @@ public class HttpFSFileSystem extends FileSystem public static final String ACLSPEC_PARAM = "aclspec"; public static final String DESTINATION_PARAM = "destination"; public static final String RECURSIVE_PARAM = "recursive"; - public static final String SKIP_TRASH_PARAM = "skiptrash"; public static final String SOURCES_PARAM = "sources"; public static final String OWNER_PARAM = "owner"; public static final String GROUP_PARAM = "group"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index a092db09eed08..e272cdc71b686 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -31,7 +31,6 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.http.client.HttpFSFileSystem; @@ -54,8 +53,6 @@ import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.apache.hadoop.fs.permission.FsCreateModes; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; import java.io.IOException; @@ -78,8 +75,6 @@ @InterfaceAudience.Private public final class FSOperations { - private static final Logger LOG = LoggerFactory.getLogger(FSOperations.class); - private static int bufferSize = 4096; private FSOperations() { @@ -722,22 +717,18 @@ public static long copyBytes(InputStream in, OutputStream out, long count) */ @InterfaceAudience.Private public static class FSDelete implements FileSystemAccess.FileSystemExecutor { - private final Path path; - private final boolean recursive; - private final boolean skipTrash; + private Path path; + private boolean recursive; /** * Creates a Delete executor. * * @param path path to delete. * @param recursive if the delete should be recursive or not. - * @param skipTrash if the file must be deleted and not kept in trash - * regardless of fs.trash.interval config value. */ - public FSDelete(String path, boolean recursive, boolean skipTrash) { + public FSDelete(String path, boolean recursive) { this.path = new Path(path); this.recursive = recursive; - this.skipTrash = skipTrash; } /** @@ -752,19 +743,6 @@ public FSDelete(String path, boolean recursive, boolean skipTrash) { */ @Override public JSONObject execute(FileSystem fs) throws IOException { - if (!skipTrash) { - boolean movedToTrash = Trash.moveToAppropriateTrash(fs, path, - fs.getConf()); - if (movedToTrash) { - HttpFSServerWebApp.getMetrics().incrOpsDelete(); - return toJSON( - StringUtils.toLowerCase(HttpFSFileSystem.DELETE_JSON), true); - } - // Same is the behavior with Delete shell command. - // If moveToAppropriateTrash() returns false, file deletion - // is attempted rather than throwing Error. - LOG.debug("Could not move {} to Trash, attempting removal", path); - } boolean deleted = fs.delete(path, recursive); HttpFSServerWebApp.get().getMetrics().incrOpsDelete(); return toJSON( diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index b810ec6690372..f6c84dcae4e07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -81,8 +81,7 @@ public class HttpFSParametersProvider extends ParametersProvider { new Class[]{ReplicationParam.class}); PARAMS_DEF.put(Operation.SETTIMES, new Class[]{ModifiedTimeParam.class, AccessTimeParam.class}); - PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class, - DeleteSkipTrashParam.class}); + PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class}); PARAMS_DEF.put(Operation.SETACL, new Class[]{AclPermissionParam.class}); PARAMS_DEF.put(Operation.REMOVEACL, new Class[]{}); PARAMS_DEF.put(Operation.MODIFYACLENTRIES, @@ -243,25 +242,6 @@ public RecursiveParam() { } } - /** - * Class for delete's skipTrash parameter. - */ - @InterfaceAudience.Private - public static class DeleteSkipTrashParam extends BooleanParam { - - /** - * Parameter name. - */ - public static final String NAME = HttpFSFileSystem.SKIP_TRASH_PARAM; - - /** - * Constructor. - */ - public DeleteSkipTrashParam() { - super(NAME, false); - } - } - /** * Class for filter parameter. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index e3254d5bd9eff..d0d76d6289912 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AclPermissionParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam; -import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DeleteSkipTrashParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ECPolicyParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam; @@ -549,13 +548,9 @@ public Response delete(@PathParam("path") String path, case DELETE: { Boolean recursive = params.get(RecursiveParam.NAME, RecursiveParam.class); - Boolean skipTrashParam = params.get(DeleteSkipTrashParam.NAME, - DeleteSkipTrashParam.class); - boolean skipTrash = skipTrashParam != null && skipTrashParam; - AUDIT_LOG.info("[{}] recursive [{}] skipTrash [{}]", path, recursive, - skipTrash); + AUDIT_LOG.info("[{}] recursive [{}]", path, recursive); FSOperations.FSDelete command = - new FSOperations.FSDelete(path, recursive, skipTrash); + new FSOperations.FSDelete(path, recursive); JSONObject json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index cddf95e22129f..6aa8aa346ef9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.fs.http.server; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -538,36 +537,6 @@ private void createWithHttp(String filename, String perms, Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); } - private void deleteWithHttp(String filename, String perms, - String unmaskedPerms, Boolean skipTrash) throws Exception { - String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; - // Remove leading / from filename - if (filename.charAt(0) == '/') { - filename = filename.substring(1); - } - String pathOps; - if (perms == null) { - pathOps = MessageFormat.format("/webhdfs/v1/{0}?user.name={1}&op=DELETE", - filename, user); - } else { - pathOps = MessageFormat.format( - "/webhdfs/v1/{0}?user.name={1}&permission={2}&op=DELETE", - filename, user, perms); - } - if (unmaskedPerms != null) { - pathOps = pathOps + "&unmaskedpermission=" + unmaskedPerms; - } - if (skipTrash != null) { - pathOps = pathOps + "&skiptrash=" + skipTrash; - } - URL url = new URL(TestJettyHelper.getJettyURL(), pathOps); - HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - conn.addRequestProperty("Content-Type", "application/octet-stream"); - conn.setRequestMethod("DELETE"); - conn.connect(); - Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); - } - /** * Talks to the http interface to create a directory. * @@ -813,37 +782,6 @@ public void testPerms() throws Exception { Assert.assertTrue("321".equals(getPerms(statusJson))); } - /** - * Validate create and delete calls. - */ - @Test - @TestDir - @TestJetty - @TestHdfs - public void testCreateDelete() throws Exception { - final String dir1 = "/testCreateDelete1"; - final String path1 = dir1 + "/file1"; - final String dir2 = "/testCreateDelete2"; - final String path2 = dir2 + "/file2"; - - createHttpFSServer(false, false); - final Configuration conf = HttpFSServerWebApp.get() - .get(FileSystemAccess.class).getFileSystemConfiguration(); - conf.setLong(FS_TRASH_INTERVAL_KEY, 5); - writeConf(conf, "hdfs-site.xml"); - - FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); - fs.mkdirs(new Path(dir1)); - - createWithHttp(path1, null); - deleteWithHttp(path1, null, null, null); - - fs.mkdirs(new Path(dir2)); - - createWithHttp(path2, null); - deleteWithHttp(path2, null, null, true); - } - /** * Validate XAttr get/set/remove calls. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html index 3150d87bdce62..80b38e7165f78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html @@ -165,32 +165,11 @@ -
    diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js index 0895eb967a6f2..cb16eac7b1221 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js @@ -82,47 +82,23 @@ function delete_path(inode_name, absolute_file_path) { $('#delete-modal-title').text("Delete - " + inode_name); $('#delete-prompt').text("Are you sure you want to delete " + inode_name - + " ?"); - $('#delete-trash-modal-title').text("Skip Trash - " + inode_name); - $('#delete-trash-prompt').text("Skipping Trash might delete file forever." - + " Do you want to skip-trash " + inode_name - + " ? (default behaviour - No)"); - - $('#skip-trash-button').click(function () { - // DELETE /webhdfs/v1/?op=DELETE&recursive=&skiptrash=true - var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true&skiptrash=true'; - $.ajax(url, - { - type: 'DELETE' - }).done(function (data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function () { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - $('#delete-trash-modal').modal('hide'); - $('#skip-trash-button').button('reset'); - }); - }) - $('#trash-button').click(function () { + + " ?"); + + $('#delete-button').click(function() { // DELETE /webhdfs/v1/?op=DELETE&recursive= var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true'; + '?op=DELETE' + '&recursive=true'; + $.ajax(url, - { - type: 'DELETE' - }).done(function (data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function () { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - $('#delete-trash-modal').modal('hide'); - $('#trash-button').button('reset'); - }); + { type: 'DELETE' + }).done(function(data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function() { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + }); }) - $('#delete-modal').modal(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 2703349db8704..e7bf32fcc05f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -55,10 +55,8 @@ import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -121,9 +119,6 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.spi.container.ResourceFilters; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; - /** Web-hdfs NameNode implementation. */ @Path("") @ResourceFilters(ParamFilter.class) @@ -1508,13 +1503,10 @@ public Response deleteRoot( @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT) final RecursiveParam recursive, @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT) - final SnapshotNameParam snapshotName, - @QueryParam(DeleteSkipTrashParam.NAME) - @DefaultValue(DeleteSkipTrashParam.DEFAULT) - final DeleteSkipTrashParam skiptrash + final SnapshotNameParam snapshotName ) throws IOException, InterruptedException { return delete(ugi, delegation, username, doAsUser, ROOT, op, recursive, - snapshotName, skiptrash); + snapshotName); } /** Handle HTTP DELETE request. */ @@ -1535,53 +1527,34 @@ public Response delete( @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT) final RecursiveParam recursive, @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT) - final SnapshotNameParam snapshotName, - @QueryParam(DeleteSkipTrashParam.NAME) - @DefaultValue(DeleteSkipTrashParam.DEFAULT) - final DeleteSkipTrashParam skiptrash + final SnapshotNameParam snapshotName ) throws IOException, InterruptedException { - init(ugi, delegation, username, doAsUser, path, op, recursive, - snapshotName, skiptrash); + init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName); - return doAs(ugi, () -> delete( - path.getAbsolutePath(), op, recursive, snapshotName, skiptrash)); + return doAs(ugi, new PrivilegedExceptionAction() { + @Override + public Response run() throws IOException { + return delete(ugi, delegation, username, doAsUser, + path.getAbsolutePath(), op, recursive, snapshotName); + } + }); } protected Response delete( + final UserGroupInformation ugi, + final DelegationParam delegation, + final UserParam username, + final DoAsParam doAsUser, final String fullpath, final DeleteOpParam op, final RecursiveParam recursive, - final SnapshotNameParam snapshotName, - final DeleteSkipTrashParam skipTrash) throws IOException { + final SnapshotNameParam snapshotName + ) throws IOException { final ClientProtocol cp = getRpcClientProtocol(); switch(op.getValue()) { case DELETE: { - Configuration conf = - (Configuration) context.getAttribute(JspHelper.CURRENT_CONF); - long trashInterval = - conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); - if (trashInterval > 0 && !skipTrash.getValue()) { - LOG.info("{} is {} , trying to archive {} instead of removing", - FS_TRASH_INTERVAL_KEY, trashInterval, fullpath); - org.apache.hadoop.fs.Path path = - new org.apache.hadoop.fs.Path(fullpath); - Configuration clonedConf = new Configuration(conf); - // To avoid caching FS objects and prevent OOM issues - clonedConf.set("fs.hdfs.impl.disable.cache", "true"); - FileSystem fs = FileSystem.get(clonedConf); - boolean movedToTrash = Trash.moveToAppropriateTrash(fs, path, - clonedConf); - if (movedToTrash) { - final String js = JsonUtil.toJsonString("boolean", true); - return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); - } - // Same is the behavior with Delete shell command. - // If moveToAppropriateTrash() returns false, file deletion - // is attempted rather than throwing Error. - LOG.debug("Could not move {} to Trash, attempting removal", fullpath); - } final boolean b = cp.delete(fullpath, recursive.getValue()); final String js = JsonUtil.toJsonString("boolean", b); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html index eb7ca3504891a..3f0509a229700 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -166,32 +166,11 @@
    -
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js index d9371bb9288e8..ea8b0accbde6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js @@ -83,46 +83,22 @@ $('#delete-modal-title').text("Delete - " + inode_name); $('#delete-prompt').text("Are you sure you want to delete " + inode_name + " ?"); - $('#delete-trash-modal-title').text("Skip Trash - " + inode_name); - $('#delete-trash-prompt').text("Skipping Trash might delete file forever." - + " Do you want to skip-trash " + inode_name - + " ? (default behaviour - No)"); - $('#skip-trash-button').click(function () { - // DELETE /webhdfs/v1/?op=DELETE&recursive=&skiptrash=true - var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true&skiptrash=true'; - $.ajax(url, - { - type: 'DELETE' - }).done(function (data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function () { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - $('#delete-trash-modal').modal('hide'); - $('#skip-trash-button').button('reset'); - }); - }) - $('#trash-button').click(function () { + $('#delete-button').click(function() { // DELETE /webhdfs/v1/?op=DELETE&recursive= var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true'; + '?op=DELETE' + '&recursive=true'; + $.ajax(url, - { - type: 'DELETE' - }).done(function (data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function () { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - $('#delete-trash-modal').modal('hide'); - $('#trash-button').button('reset'); - }); + { type: 'DELETE' + }).done(function(data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function() { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + }); }) - $('#delete-modal').modal(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 07891ae64b3e5..3fb68d40e929b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -462,11 +462,7 @@ See also: [`destination`](#Destination), [FileSystem](../../api/org/apache/hadoo * Submit a HTTP DELETE request. curl -i -X DELETE "http://:/webhdfs/v1/?op=DELETE - [&recursive=][&skiptrash=]" - - Default values of queryparams if not provided: - 1. recursive: false - 2. skiptrash: false + [&recursive=]" The client receives a response with a [`boolean` JSON object](#Boolean_JSON_Schema): diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 698c323e9a970..68087c4c4ed6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.web; -import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER; -import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; @@ -60,8 +58,6 @@ import java.util.NoSuchElementException; import java.util.Random; -import org.apache.hadoop.hdfs.web.resources.DeleteSkipTrashParam; -import org.apache.hadoop.hdfs.web.resources.RecursiveParam; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.QuotaUsage; @@ -1563,12 +1559,8 @@ private void checkResponseContainsLocation(URL url, String TYPE) HttpURLConnection.HTTP_OK, conn.getResponseCode()); JSONObject responseJson = new JSONObject(response); - if (!TYPE.equals("DELETE")) { - Assert.assertTrue("Response didn't give us a location. " + response, - responseJson.has("Location")); - } else { - Assert.assertTrue(responseJson.getBoolean("boolean")); - } + Assert.assertTrue("Response didn't give us a location. " + response, + responseJson.has("Location")); //Test that the DN allows CORS on Create if(TYPE.equals("CREATE")) { @@ -1580,15 +1572,14 @@ private void checkResponseContainsLocation(URL url, String TYPE) } } + @Test /** * Test that when "&noredirect=true" is added to operations CREATE, APPEND, * OPEN, and GETFILECHECKSUM the response (which is usually a 307 temporary * redirect) is a 200 with JSON that contains the redirected location */ - @Test public void testWebHdfsNoRedirect() throws Exception { final Configuration conf = WebHdfsTestUtil.createConf(); - conf.setLong(FS_TRASH_INTERVAL_KEY, 5); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); LOG.info("Started cluster"); InetSocketAddress addr = cluster.getNameNode().getHttpAddress(); @@ -1627,26 +1618,6 @@ public void testWebHdfsNoRedirect() throws Exception { + Param.toSortedString("&", new NoRedirectParam(true))); LOG.info("Sending append request " + url); checkResponseContainsLocation(url, "POST"); - - // setup some permission to allow moving file to .Trash location - cluster.getFileSystem().setPermission(new Path("/testWebHdfsNoRedirect"), - new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); - Path userDir = new Path(FileSystem.USER_HOME_PREFIX); - Path trashDir = new Path(FileSystem.USER_HOME_PREFIX, DEFAULT_HADOOP_HTTP_STATIC_USER); - Path trashPath = new Path(FileSystem.USER_HOME_PREFIX, - new Path(DEFAULT_HADOOP_HTTP_STATIC_USER, FileSystem.TRASH_PREFIX)); - cluster.getFileSystem().mkdirs(userDir, FsPermission.getDirDefault()); - cluster.getFileSystem().mkdir(trashDir, FsPermission.getDirDefault()); - cluster.getFileSystem().mkdir(trashPath, FsPermission.getDirDefault()); - cluster.getFileSystem().setOwner(trashPath, DEFAULT_HADOOP_HTTP_STATIC_USER, HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT); - cluster.getFileSystem().setPermission(new Path("/"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); - - url = new URL("http", addr.getHostString(), addr.getPort(), - WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirect" + "?op=DELETE" - + Param.toSortedString("&", new RecursiveParam(true)) - + Param.toSortedString("&", new DeleteSkipTrashParam(false))); - LOG.info("Sending append request " + url); - checkResponseContainsLocation(url, "DELETE"); } @Test From 611c0110d29fd2b3c62121e83ff37613508f5a98 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Mon, 31 May 2021 20:34:48 -0700 Subject: [PATCH 373/666] HADOOP-17739. Use hadoop-thirdparty 1.1.1. (#3064) Reviewed-by: Akira Ajisaka --- hadoop-project/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 817a54db59a03..60ba45b37fd0d 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -90,7 +90,7 @@ 3.7.1 ${env.HADOOP_PROTOC_PATH} - 1.1.0 + 1.1.1 ${hadoop-thirdparty.version} ${hadoop-thirdparty.version} org.apache.hadoop.thirdparty From efa9a68236c70f0b66eb337535d92b1e81e5b056 Mon Sep 17 00:00:00 2001 From: huhaiyang Date: Tue, 1 Jun 2021 15:26:47 +0800 Subject: [PATCH 374/666] HDFS-15998. Fix NullPointException In listOpenFiles (#3036) Co-authored-by: huhaiyang --- .../hdfs/server/namenode/FSNamesystem.java | 8 ++- .../hdfs/server/namenode/LeaseManager.java | 9 ++- .../apache/hadoop/hdfs/TestDecommission.java | 67 +++++++++++++++++++ .../server/namenode/TestListOpenFiles.java | 31 +++++++++ 4 files changed, 112 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 095959b45bf67..6abe0c478a079 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2006,8 +2006,14 @@ public BatchedListEntries getFilesBlockingDecom(long prevId, continue; } Preconditions.checkState(ucFile instanceof INodeFile); - openFileIds.add(ucFileId); + INodeFile inodeFile = ucFile.asFile(); + if (!inodeFile.isUnderConstruction()) { + LOG.warn("The file {} is not under construction but has lease.", + inodeFile.getFullPathName()); + continue; + } + openFileIds.add(ucFileId); String fullPathName = inodeFile.getFullPathName(); if (org.apache.commons.lang3.StringUtils.isEmpty(path) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index f6f240dae1f3d..15c940aec618f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -300,8 +300,13 @@ public BatchedListEntries getUnderConstructionFiles( Iterator inodeIdIterator = inodeIds.iterator(); while (inodeIdIterator.hasNext()) { Long inodeId = inodeIdIterator.next(); - final INodeFile inodeFile = - fsnamesystem.getFSDirectory().getInode(inodeId).asFile(); + INode ucFile = fsnamesystem.getFSDirectory().getInode(inodeId); + if (ucFile == null) { + //probably got deleted + continue; + } + + final INodeFile inodeFile = ucFile.asFile(); if (!inodeFile.isUnderConstruction()) { LOG.warn("The file {} is not under construction but has lease.", inodeFile.getFullPathName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 18209a4d179e6..4f5ab17900db8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -37,6 +37,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Pattern; +import java.util.EnumSet; import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; @@ -46,6 +47,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -55,6 +57,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.OpenFileEntry; +import org.apache.hadoop.hdfs.protocol.OpenFilesIterator; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; @@ -867,6 +871,69 @@ public void run() { closedFileSet, openFilesMap, 0); } + /** + * Verify Decommission In Progress with List Open Files + * 1. start decommissioning a node (set LeavingServiceStatus) + * 2. close file with decommissioning + * @throws Exception + */ + @Test(timeout=180000) + public void testDecommissionWithCloseFileAndListOpenFiles() + throws Exception { + LOG.info("Starting test testDecommissionWithCloseFileAndListOpenFiles"); + + // Disable redundancy monitor check so that open files blocking + // decommission can be listed and verified. + getConf().setInt( + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1000); + getConf().setLong( + DFSConfigKeys.DFS_NAMENODE_LIST_OPENFILES_NUM_RESPONSES, 1); + + startSimpleCluster(1, 3); + FileSystem fileSys = getCluster().getFileSystem(0); + FSNamesystem ns = getCluster().getNamesystem(0); + Path file = new Path("/openFile"); + FSDataOutputStream st = AdminStatesBaseTest.writeIncompleteFile(fileSys, + file, (short)3, (short)(fileSize / blockSize)); + for (DataNode d: getCluster().getDataNodes()) { + DataNodeTestUtils.triggerBlockReport(d); + } + + LocatedBlocks lbs = NameNodeAdapter.getBlockLocations( + getCluster().getNameNode(0), file.toUri().getPath(), + 0, blockSize * 10); + DatanodeInfo dnToDecommission = lbs.getLastLocatedBlock().getLocations()[0]; + + DatanodeManager dm = ns.getBlockManager().getDatanodeManager(); + dnToDecommission = dm.getDatanode(dnToDecommission.getDatanodeUuid()); + initExcludeHost(dnToDecommission.getXferAddr()); + refreshNodes(0); + BlockManagerTestUtil.recheckDecommissionState(dm); + waitNodeState(dnToDecommission, AdminStates.DECOMMISSION_INPROGRESS); + Thread.sleep(3000); + //Make sure DatanodeAdminMonitor(DatanodeAdminBackoffMonitor) At least twice run. + + BatchedEntries batchedListEntries = getCluster(). + getNameNodeRpc(0).listOpenFiles(0, + EnumSet.of(OpenFilesIterator.OpenFilesType.BLOCKING_DECOMMISSION), + OpenFilesIterator.FILTER_PATH_DEFAULT); + assertEquals(1, batchedListEntries.size()); + st.close(); //close file + + try { + batchedListEntries = getCluster().getNameNodeRpc().listOpenFiles(0, + EnumSet.of(OpenFilesIterator.OpenFilesType.BLOCKING_DECOMMISSION), + OpenFilesIterator.FILTER_PATH_DEFAULT); + assertEquals(0, batchedListEntries.size()); + } catch (NullPointerException e) { + Assert.fail("Should not throw NPE when the file is not under " + + "construction but has lease!"); + } + initExcludeHost(""); + refreshNodes(0); + fileSys.delete(file, false); + } + @Test(timeout = 360000) public void testDecommissionWithOpenFileAndBlockRecovery() throws IOException, InterruptedException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java index c6603cfee2ce0..c60a1369bd986 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java @@ -54,9 +54,11 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.ChunkedArrayList; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.Assert; /** * Verify open files listing. @@ -321,4 +323,33 @@ public void testListOpenFilesWithInvalidPathClientSide() throws Exception { "hdfs://non-cluster/")); fs.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES), "/path"); } + + @Test + public void testListOpenFilesWithDeletedPath() throws Exception { + HashMap openFiles = new HashMap<>(); + openFiles.putAll( + DFSTestUtil.createOpenFiles(fs, new Path("/"), "open-1", 1)); + BatchedEntries openFileEntryBatchedEntries = nnRpc + .listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES), + OpenFilesIterator.FILTER_PATH_DEFAULT); + assertEquals(1, openFileEntryBatchedEntries.size()); + String path = openFileEntryBatchedEntries.get(0).getFilePath(); + FSNamesystem fsNamesystem = cluster.getNamesystem(); + FSDirectory dir = fsNamesystem.getFSDirectory(); + List removedINodes = new ChunkedArrayList<>(); + removedINodes.add(dir.getINode(path)); + fsNamesystem.writeLock(); + try { + dir.removeFromInodeMap(removedINodes); + openFileEntryBatchedEntries = nnRpc + .listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES), + OpenFilesIterator.FILTER_PATH_DEFAULT); + assertEquals(0, openFileEntryBatchedEntries.size()); + fsNamesystem.leaseManager.removeLease(dir.getINode(path).getId()); + } catch (NullPointerException e) { + Assert.fail("Should not throw NPE when the file is deleted but has lease!"); + } finally { + fsNamesystem.writeUnlock(); + } + } } From 1301d931d45e2f3a91c336a4374ce293d5ab144b Mon Sep 17 00:00:00 2001 From: Gergely Pollak Date: Tue, 1 Jun 2021 15:57:22 +0200 Subject: [PATCH 375/666] YARN-10797. Logging parameter issues in scheduler package. Contributed by Szilard Nemeth --- .../scheduler/capacity/QueueConfigurationAutoRefreshPolicy.java | 2 +- .../resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueConfigurationAutoRefreshPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueConfigurationAutoRefreshPolicy.java index 0ae0777e80155..21c81a671d638 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueConfigurationAutoRefreshPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueConfigurationAutoRefreshPolicy.java @@ -140,7 +140,7 @@ public void editSchedule() { LOG.error("Can't refresh queue: " + e); if (!lastReloadAttemptFailed) { LOG.error("Failed to reload capacity scheduler config file - " + - "will use existing conf.", e.getMessage()); + "will use existing conf. Message: {}", e.getMessage()); } lastReloadAttempt = clock.getTime(); lastReloadAttemptFailed = true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index 6507259f10cfd..dc1092913320c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -1200,7 +1200,7 @@ public boolean moveReservation(RMContainer reservedContainer, targetNode.reserveResource(this, reservedContainer.getReservedSchedulerKey(), reservedContainer); } catch (IllegalStateException e) { - LOG.debug("Reserve on target node failed, e={}", e); + LOG.debug("Reserve on target node failed", e); return false; } From 00c4b07fa4157aa310c1c7bb253cac3915558136 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Tue, 1 Jun 2021 15:30:27 +0200 Subject: [PATCH 376/666] YARN-10787. Queue submit ACL check is wrong when CS queue is ambiguous. Contributed by Gergely Pollak --- .../server/resourcemanager/RMAppManager.java | 26 ++-- .../TestCapacitySchedulerAmbiguousLeafs.java | 123 ++++++++++++++++++ 2 files changed, 137 insertions(+), 12 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAmbiguousLeafs.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 25ff384c74278..d5297a2a73284 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -84,9 +84,9 @@ import org.apache.hadoop.yarn.util.StringHelper; /** - * This class manages the list of applications for the resource manager. + * This class manages the list of applications for the resource manager. */ -public class RMAppManager implements EventHandler, +public class RMAppManager implements EventHandler, Recoverable { private static final Logger LOG = @@ -143,7 +143,7 @@ static class ApplicationSummary { static final Logger LOG = LoggerFactory. getLogger(ApplicationSummary.class); - // Escape sequences + // Escape sequences static final char EQUALS = '='; static final char[] charsToEscape = {StringUtils.COMMA, EQUALS, StringUtils.ESCAPE_CHAR}; @@ -182,7 +182,7 @@ SummaryBuilder _add(String key, String value) { /** * create a summary of the application's runtime. - * + * * @param app {@link RMApp} whose summary is to be created, cannot * be null. */ @@ -247,7 +247,7 @@ public static SummaryBuilder createAppSummary(RMApp app) { /** * Log a summary of the application's runtime. - * + * * @param app {@link RMApp} whose summary is to be logged */ public static void logAppSummary(RMApp app) { @@ -274,7 +274,7 @@ private static V getChecked(Future future) throws YarnException { } protected synchronized int getCompletedAppsListSize() { - return this.completedApps.size(); + return this.completedApps.size(); } protected synchronized void finishApplication(ApplicationId applicationId) { @@ -285,7 +285,7 @@ protected synchronized void finishApplication(ApplicationId applicationId) { if (UserGroupInformation.isSecurityEnabled()) { rmContext.getDelegationTokenRenewer().applicationFinished(applicationId); } - + completedApps.add(applicationId); completedAppsInStateStore++; writeAuditLog(applicationId); @@ -297,26 +297,26 @@ protected void writeAuditLog(ApplicationId appId) { String operation = "UNKONWN"; boolean success = false; switch (app.getState()) { - case FAILED: + case FAILED: operation = AuditConstants.FINISH_FAILED_APP; break; case FINISHED: operation = AuditConstants.FINISH_SUCCESS_APP; success = true; break; - case KILLED: + case KILLED: operation = AuditConstants.FINISH_KILLED_APP; success = true; break; default: break; } - + if (success) { RMAuditLogger.logSuccess(app.getUser(), operation, "RMAppManager", app.getApplicationId()); } else { - StringBuilder diag = app.getDiagnostics(); + StringBuilder diag = app.getDiagnostics(); String msg = diag == null ? null : diag.toString(); RMAuditLogger.logFailure(app.getUser(), operation, msg, "RMAppManager", "App failed with state: " + app.getState(), appId); @@ -445,7 +445,9 @@ private RMAppImpl createAndPopulateNewRMApp( if (!isRecovery && YarnConfiguration.isAclEnabled(conf)) { if (scheduler instanceof CapacityScheduler) { - String queueName = submissionContext.getQueue(); + String queueName = placementContext == null ? + submissionContext.getQueue() : placementContext.getFullQueuePath(); + String appName = submissionContext.getApplicationName(); CSQueue csqueue = ((CapacityScheduler) scheduler).getQueue(queueName); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAmbiguousLeafs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAmbiguousLeafs.java new file mode 100644 index 0000000000000..69824e3c3fa44 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAmbiguousLeafs.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.records.*; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.junit.Test; + +import java.io.IOException; + +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.ROOT; + +public class TestCapacitySchedulerAmbiguousLeafs { + /** + * Internal counter for incremental application id generation + */ + int appId = 0; + + /** + * Helper method to submit applications via RMClientService, to make sure + * all submissions go through RMAppManager. + * @param rm The resource manager instance + * @param queue Name of the queue to submit the application to + * @return ApplicationID of the submitted application + * @throws IOException + * @throws YarnException + */ + private ApplicationId submitApplication(MockRM rm, String queue) + throws IOException, YarnException { + //Generating incremental application id + final ApplicationAttemptId appAttemptId = TestUtils + .getMockApplicationAttemptId(appId++, 1); + + Resource resource = BuilderUtils.newResource(1024, 1); + ContainerLaunchContext amContainerSpec = ContainerLaunchContext + .newInstance(null, null, null, null, null, null); + ApplicationSubmissionContext asc = ApplicationSubmissionContext + .newInstance(appAttemptId.getApplicationId(), "Test application", + queue, null, amContainerSpec, false, true, 1, resource, + "applicationType"); + + SubmitApplicationRequest req = SubmitApplicationRequest.newInstance(asc); + rm.getClientRMService().submitApplication(req); + return appAttemptId.getApplicationId(); + } + + @Test + public void testAmbiguousSubmissionWithACL() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName()); + conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); + + MockRM rm = new MockRM(conf); + CapacityScheduler cs = (CapacityScheduler)rm.getResourceScheduler(); + CapacitySchedulerConfiguration schedulerConf = cs.getConfiguration(); + + schedulerConf.setQueues(ROOT, new String[] {"a", "b", "default"}); + schedulerConf.setAcl(ROOT, QueueACL.SUBMIT_APPLICATIONS, " "); + schedulerConf.setAcl(ROOT, QueueACL.ADMINISTER_QUEUE, "forbidden forbidden"); + + schedulerConf.setQueues(ROOT + ".a", new String[] {"unique", "ambi"}); + schedulerConf.setAcl(ROOT + ".a", QueueACL.SUBMIT_APPLICATIONS, "forbidden forbidden"); + schedulerConf.setCapacity(ROOT + ".a", 45); + + schedulerConf.setQueues(ROOT + ".b", new String[] {"ambi"}); + schedulerConf.setCapacity(ROOT + ".b", 45); + schedulerConf.setCapacity(ROOT + ".default", 10); + + schedulerConf.setCapacity(ROOT + ".a.unique", 50); + schedulerConf.setAcl(ROOT + ".a.unique", QueueACL.SUBMIT_APPLICATIONS, "* *"); + schedulerConf.setCapacity(ROOT + ".a.ambi", 50); + schedulerConf.setAcl(ROOT + ".a.ambi", QueueACL.SUBMIT_APPLICATIONS, "* *"); + schedulerConf.setCapacity(ROOT + ".b.ambi", 100); + + schedulerConf.set(CapacitySchedulerConfiguration.MAPPING_RULE_FORMAT, "json"); + //Simple %specified mapping rule for all submissions with skip fallback + //The %specified needed rule to make sure we get an + //ApplicationPlacementContext which is required for validating YARN-10787 + schedulerConf.set(CapacitySchedulerConfiguration.MAPPING_RULE_JSON, + "{\"rules\" : [{\"type\": \"user\", \"policy\" : \"specified\", " + + "\"fallbackResult\" : \"skip\", \"matches\" : \"*\"}]}"); + schedulerConf.setOverrideWithQueueMappings(true); + + rm.start(); + cs.reinitialize(schedulerConf, rm.getRMContext()); + + + ApplicationId id = submitApplication(rm, "root.a.unique"); + rm.waitForState(id, RMAppState.ACCEPTED); + + id = submitApplication(rm, "unique"); + rm.waitForState(id, RMAppState.ACCEPTED); + + id = submitApplication(rm, "ambi"); + rm.waitForState(id, RMAppState.FAILED); + + id = submitApplication(rm, "root.a.ambi"); + rm.waitForState(id, RMAppState.ACCEPTED); + + rm.stop(); + } +} From 9e9aaa1f44169a6135f7866398bf7d4aaa9a40ae Mon Sep 17 00:00:00 2001 From: Kiran Kumar Maturi Date: Tue, 1 Jun 2021 22:24:45 +0530 Subject: [PATCH 377/666] changes to use environment variables for tracing exporter --- .../hadoop-common/src/main/conf/hadoop-env.sh | 6 ++++ .../org/apache/hadoop/tracing/Tracer.java | 12 ++++--- .../org/apache/hadoop/tracing/TestTracer.java | 33 +++++++++++++++++++ 3 files changed, 47 insertions(+), 4 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/TestTracer.java diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index f4625f5999b1c..db05942e5dc70 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -428,3 +428,9 @@ export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} # By default, Hadoop uses jsvc which needs to know to launch a # server jvm. # export HADOOP_REGISTRYDNS_SECURE_EXTRA_OPTS="-jvm server" + +### +# Opentelemetry Trace Exporters Configuration +### +export OTEL_TRACES_EXPORTER=none +export OTEL_METRICS_EXPORTER=none \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java index b53f5c692d592..dadebbab2ae00 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/Tracer.java @@ -22,7 +22,6 @@ import io.opentelemetry.sdk.autoconfigure.OpenTelemetrySdkAutoConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * No-Op Tracer (for now) to remove HTrace without changing too many files. */ @@ -104,14 +103,19 @@ public Builder conf(TraceConfiguration conf) { } static OpenTelemetry initialiseTracer(String name) { - //TODO: Explore the possibility of moving these properties to config or environment variables - System.setProperty("otel.metrics.exporter", "none"); - System.setProperty("otel.traces.exporter", "none"); + //added to avoid test failures + setOTelEnvVariables("none", "none"); System.setProperty("otel.resource.attributes", String.format("service.name=%s", name)); OpenTelemetry openTelemetry = OpenTelemetrySdkAutoConfiguration.initialize(); return openTelemetry; } + //this method is added to set the environment variables for testing + private static void setOTelEnvVariables(String metricsExporter, String tracesExporter) { + System.setProperty("otel.metrics.exporter", metricsExporter); + System.setProperty("otel.traces.exporter", tracesExporter); + } + public synchronized Tracer build() { if (globalTracer == null) { globalTracer = new Tracer(name, initialiseTracer(name).getTracer(name)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/TestTracer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/TestTracer.java new file mode 100644 index 0000000000000..275f09637c340 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/TestTracer.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.tracing; + +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestTracer { + @Test + public void testNoopTracer(){ + Tracer tracer = new Tracer.Builder("testTracer").build(); + assertNotNull(tracer.OTelTracer); + } + +} + + From 4d966d66dedbaf8579ad0b69bb9805e5c06a7cd2 Mon Sep 17 00:00:00 2001 From: sumangala-patki <70206833+sumangala-patki@users.noreply.github.com> Date: Thu, 3 Jun 2021 14:26:15 +0530 Subject: [PATCH 378/666] HADOOP-17596. ABFS: Change default Readahead Queue Depth from num(processors) to const (#2795) . Contributed by Sumangala Patki. --- .../constants/FileSystemConfigurations.java | 2 +- .../hadoop-azure/src/site/markdown/abfs.md | 2 +- .../azurebfs/services/TestAbfsInputStream.java | 16 ++++++++++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java index dc4caa98a5e60..24d7d1593f3d3 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java @@ -88,7 +88,7 @@ public final class FileSystemConfigurations { public static final int MIN_LEASE_DURATION = 15; public static final int MAX_LEASE_DURATION = 60; - public static final int DEFAULT_READ_AHEAD_QUEUE_DEPTH = -1; + public static final int DEFAULT_READ_AHEAD_QUEUE_DEPTH = 2; public static final boolean DEFAULT_ENABLE_FLUSH = true; public static final boolean DEFAULT_DISABLE_OUTPUTSTREAM_FLUSH = true; diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md index 6be5952b03aa6..8724b97ab31d4 100644 --- a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md +++ b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md @@ -803,7 +803,7 @@ pattern is detected. `fs.azure.readaheadqueue.depth`: Sets the readahead queue depth in AbfsInputStream. In case the set value is negative the read ahead queue depth will be set as Runtime.getRuntime().availableProcessors(). By default the value -will be -1. To disable readaheads, set this value to 0. If your workload is +will be 2. To disable readaheads, set this value to 0. If your workload is doing only random reads (non-sequential) or you are seeing throttling, you may try setting this value to 0. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java index cbf3d6a2a68ee..3da004bafa4df 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java @@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; @@ -48,6 +49,7 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.FORWARD_SLASH; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_READ_AHEAD_QUEUE_DEPTH; /** * Unit test AbfsInputStream. @@ -569,6 +571,20 @@ public void testDiffReadRequestSizeAndRAHBlockSize() throws Exception { testReadAheads(inputStream, FORTY_EIGHT_KB, SIXTEEN_KB); } + @Test + public void testDefaultReadaheadQueueDepth() throws Exception { + Configuration config = getRawConfiguration(); + config.unset(FS_AZURE_READ_AHEAD_QUEUE_DEPTH); + AzureBlobFileSystem fs = getFileSystem(config); + Path testFile = new Path("/testFile"); + fs.create(testFile); + FSDataInputStream in = fs.open(testFile); + Assertions.assertThat( + ((AbfsInputStream) in.getWrappedStream()).getReadAheadQueueDepth()) + .describedAs("readahead queue depth should be set to default value 2") + .isEqualTo(2); + } + private void testReadAheads(AbfsInputStream inputStream, int readRequestSize, From 83e62e3d47221b07de74c9ec2150435f441db417 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 3 Jun 2021 15:26:00 +0530 Subject: [PATCH 379/666] HADOOP-17152. Provide Hadoop's own Lists utility to reduce dependency on Guava (#3061) Reviewed-by: Wei-Chiu Chuang Signed-off-by: Takanobu Asanuma --- .../java/org/apache/hadoop/util/Lists.java | 235 ++++++++++++++++++ .../org/apache/hadoop/util/TestLists.java | 102 ++++++++ 2 files changed, 337 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java new file mode 100644 index 0000000000000..b6d74ee679281 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import org.apache.hadoop.classification.InterfaceAudience; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; + +/** + * Static utility methods pertaining to {@link List} instances. + * This class is Hadoop's internal use alternative to Guava's Lists + * utility class. + * Javadocs for majority of APIs in this class are taken from Guava's Lists + * class from Guava release version 27.0-jre. + */ +@InterfaceAudience.Private +public final class Lists { + + private Lists() { + // empty + } + + /** + * Creates a mutable, empty {@code ArrayList} instance. + */ + public static ArrayList newArrayList() { + return new ArrayList<>(); + } + + /** + * Creates a mutable {@code ArrayList} instance containing the given + * elements. + * + *

    Note that even when you do need the ability to add or remove, + * this method provides only a tiny bit of syntactic sugar for + * {@code newArrayList(} + * {@link Arrays#asList asList} + * {@code (...))}, or for creating an empty list then calling + * {@link Collections#addAll}. + */ + @SafeVarargs + public static ArrayList newArrayList(E... elements) { + if (elements == null) { + throw new NullPointerException(); + } + // Avoid integer overflow when a large array is passed in + int capacity = computeArrayListCapacity(elements.length); + ArrayList list = new ArrayList<>(capacity); + Collections.addAll(list, elements); + return list; + } + + /** + * Creates a mutable {@code ArrayList} instance containing the + * given elements; a very thin shortcut for creating an empty list then + * calling Iterables#addAll. + */ + public static ArrayList newArrayList(Iterable elements) { + if (elements == null) { + throw new NullPointerException(); + } + return (elements instanceof Collection) + ? new ArrayList<>(cast(elements)) + : newArrayList(elements.iterator()); + } + + /** + * Creates a mutable {@code ArrayList} instance containing the + * given elements; a very thin shortcut for creating an empty list + * and then calling Iterators#addAll. + */ + public static ArrayList newArrayList(Iterator elements) { + ArrayList list = newArrayList(); + addAll(list, elements); + return list; + } + + /** + * Creates an {@code ArrayList} instance backed by an array with the + * specified initial size; + * simply delegates to {@link ArrayList#ArrayList(int)}. + * + * @param initialArraySize the exact size of the initial backing array for + * the returned array list + * ({@code ArrayList} documentation calls this value the "capacity"). + * @return a new, empty {@code ArrayList} which is guaranteed not to + * resize itself unless its size reaches {@code initialArraySize + 1}. + * @throws IllegalArgumentException if {@code initialArraySize} is negative. + */ + public static ArrayList newArrayListWithCapacity( + int initialArraySize) { + checkNonnegative(initialArraySize, "initialArraySize"); + return new ArrayList<>(initialArraySize); + } + + /** + * Creates an {@code ArrayList} instance to hold {@code estimatedSize} + * elements, plus an unspecified amount of padding; + * you almost certainly mean to call {@link + * #newArrayListWithCapacity} (see that method for further advice on usage). + * + * @param estimatedSize an estimate of the eventual {@link List#size()} + * of the new list. + * @return a new, empty {@code ArrayList}, sized appropriately to hold the + * estimated number of elements. + * @throws IllegalArgumentException if {@code estimatedSize} is negative. + */ + public static ArrayList newArrayListWithExpectedSize( + int estimatedSize) { + return new ArrayList<>(computeArrayListCapacity(estimatedSize)); + } + + /** + * Creates a mutable, empty {@code LinkedList} instance. + * + *

    Performance note: {@link ArrayList} and + * {@link java.util.ArrayDeque} consistently + * outperform {@code LinkedList} except in certain rare and specific + * situations. Unless you have + * spent a lot of time benchmarking your specific needs, use one of those + * instead. + */ + public static LinkedList newLinkedList() { + return new LinkedList<>(); + } + + /** + * Creates a mutable {@code LinkedList} instance containing the given + * elements; a very thin shortcut for creating an empty list then calling + * Iterables#addAll. + * + *

    Performance note: {@link ArrayList} and + * {@link java.util.ArrayDeque} consistently + * outperform {@code LinkedList} except in certain rare and specific + * situations. Unless you have spent a lot of time benchmarking your + * specific needs, use one of those instead. + */ + public static LinkedList newLinkedList( + Iterable elements) { + LinkedList list = newLinkedList(); + addAll(list, elements); + return list; + } + + private static int computeArrayListCapacity(int arraySize) { + checkNonnegative(arraySize, "arraySize"); + return saturatedCast(5L + arraySize + (arraySize / 10)); + } + + private static int checkNonnegative(int value, String name) { + if (value < 0) { + throw new IllegalArgumentException(name + " cannot be negative but was: " + + value); + } + return value; + } + + /** + * Returns the {@code int} nearest in value to {@code value}. + * + * @param value any {@code long} value. + * @return the same value cast to {@code int} if it is in the range of the + * {@code int} type, {@link Integer#MAX_VALUE} if it is too large, + * or {@link Integer#MIN_VALUE} if it is too small. + */ + private static int saturatedCast(long value) { + if (value > Integer.MAX_VALUE) { + return Integer.MAX_VALUE; + } + if (value < Integer.MIN_VALUE) { + return Integer.MIN_VALUE; + } + return (int) value; + } + + private static boolean addAll(Collection addTo, + Iterator iterator) { + if (addTo == null) { + throw new NullPointerException(); + } + if (iterator == null) { + throw new NullPointerException(); + } + boolean wasModified = false; + while (iterator.hasNext()) { + wasModified |= addTo.add(iterator.next()); + } + return wasModified; + } + + private static Collection cast(Iterable iterable) { + return (Collection) iterable; + } + + /** + * Adds all elements in {@code iterable} to {@code collection}. + * + * @return {@code true} if {@code collection} was modified as a result of + * this operation. + */ + private static boolean addAll(Collection addTo, + Iterable elementsToAdd) { + if (elementsToAdd instanceof Collection) { + Collection c = cast(elementsToAdd); + return addTo.addAll(c); + } + if (elementsToAdd == null) { + throw new NullPointerException(); + } + return addAll(addTo, elementsToAdd.iterator()); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java new file mode 100644 index 0000000000000..537e3781edc0e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Simple tests for utility class Lists. + */ +public class TestLists { + + @Test + public void testAddToEmptyArrayList() { + List list = Lists.newArrayList(); + list.add("record1"); + Assert.assertEquals(1, list.size()); + Assert.assertEquals("record1", list.get(0)); + } + + @Test + public void testAddToEmptyLinkedList() { + List list = Lists.newLinkedList(); + list.add("record1"); + Assert.assertEquals(1, list.size()); + Assert.assertEquals("record1", list.get(0)); + } + + @Test + public void testVarArgArrayLists() { + List list = Lists.newArrayList("record1", "record2", "record3"); + list.add("record4"); + Assert.assertEquals(4, list.size()); + Assert.assertEquals("record1", list.get(0)); + Assert.assertEquals("record2", list.get(1)); + Assert.assertEquals("record3", list.get(2)); + Assert.assertEquals("record4", list.get(3)); + } + + @Test + public void testItrArrayLists() { + Set set = new HashSet<>(); + set.add("record1"); + set.add("record2"); + set.add("record3"); + List list = Lists.newArrayList(set); + list.add("record4"); + Assert.assertEquals(4, list.size()); + } + + @Test + public void testItrLinkedLists() { + Set set = new HashSet<>(); + set.add("record1"); + set.add("record2"); + set.add("record3"); + List list = Lists.newLinkedList(set); + list.add("record4"); + Assert.assertEquals(4, list.size()); + } + + @Test + public void testArrayListWithSize() { + List list = Lists.newArrayListWithCapacity(3); + list.add("record1"); + list.add("record2"); + list.add("record3"); + Assert.assertEquals(3, list.size()); + Assert.assertEquals("record1", list.get(0)); + Assert.assertEquals("record2", list.get(1)); + Assert.assertEquals("record3", list.get(2)); + list = Lists.newArrayListWithCapacity(3); + list.add("record1"); + list.add("record2"); + list.add("record3"); + Assert.assertEquals(3, list.size()); + Assert.assertEquals("record1", list.get(0)); + Assert.assertEquals("record2", list.get(1)); + Assert.assertEquals("record3", list.get(2)); + } + +} From a0d9d0d9acd4666a3f53ae6b41f070c11ad6c1dd Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Thu, 3 Jun 2021 13:27:36 +0200 Subject: [PATCH 380/666] YARN-10796. Capacity Scheduler: dynamic queue cannot scale out properly if its capacity is 0%. Contributed by Peter Bacsko --- .../scheduler/capacity/UsersManager.java | 24 ++-- .../scheduler/capacity/TestUsersManager.java | 109 ++++++++++++++++++ 2 files changed, 126 insertions(+), 7 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUsersManager.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java index 6f7d8f6155a1d..cf9dead1837e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java @@ -702,7 +702,8 @@ private void computeNumActiveUsersWithOnlyPendingApps() { activeUsersWithOnlyPendingApps = new AtomicInteger(numPendingUsers); } - private Resource computeUserLimit(String userName, Resource clusterResource, + @VisibleForTesting + Resource computeUserLimit(String userName, Resource clusterResource, String nodePartition, SchedulingMode schedulingMode, boolean activeUser) { Resource partitionResource = labelManager.getResourceByLabel(nodePartition, clusterResource); @@ -716,6 +717,7 @@ private Resource computeUserLimit(String userName, Resource clusterResource, * (which extra resources we are allocating) */ Resource queueCapacity = lQueue.getEffectiveCapacity(nodePartition); + Resource originalCapacity = queueCapacity; /* * Assume we have required resource equals to minimumAllocation, this can @@ -791,16 +793,19 @@ partitionResource, getUsageRatio(nodePartition), // IGNORE_PARTITION_EXCLUSIVITY allocation. Resource maxUserLimit = Resources.none(); if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) { - // If user-limit-factor set to -1, we should disabled user limit. - if (getUserLimitFactor() != -1) { - maxUserLimit = Resources.multiplyAndRoundDown(queueCapacity, - getUserLimitFactor()); - } else { + if (getUserLimitFactor() == -1 || + originalCapacity.equals(Resources.none())) { + // If user-limit-factor set to -1, we should disable user limit. + // + // Also prevent incorrect maxUserLimit due to low queueCapacity + // Can happen if dynamic queue has capacity = 0% maxUserLimit = lQueue. getEffectiveMaxCapacityDown( nodePartition, lQueue.getMinimumAllocation()); + } else { + maxUserLimit = Resources.multiplyAndRoundDown(queueCapacity, + getUserLimitFactor()); } - } else if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) { maxUserLimit = partitionResource; } @@ -1131,4 +1136,9 @@ public void updateUserWeights() { public int getNumActiveUsersWithOnlyPendingApps() { return activeUsersWithOnlyPendingApps.get(); } + + @VisibleForTesting + void setUsageRatio(String label, float usage) { + qUsageRatios.usageRatios.put(label, usage); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUsersManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUsersManager.java new file mode 100644 index 0000000000000..5b79ee2e255c1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUsersManager.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import static org.mockito.Mockito.when; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class TestUsersManager { + private static final Resource CLUSTER_RESOURCE = + Resource.newInstance(16384, 16); + private static final Resource MINIMUM_ALLOCATION = + Resource.newInstance(1024, 1); + private static final Resource MAX_RESOURCE_LIMIT = + Resource.newInstance(9216, 1); + private static final Resource NON_ZERO_CAPACITY = + Resource.newInstance(8192, 1); + private static final String TEST_USER = "test"; + + private UsersManager usersManager; + + @Mock + private AutoCreatedLeafQueue lQueue; + + @Mock + private RMNodeLabelsManager labelMgr; + + @Mock + private QueueMetrics metrics; + + @Mock + private CapacitySchedulerContext context; + + @Before + public void setup() { + usersManager = new UsersManager(metrics, + lQueue, + labelMgr, + context, + new DefaultResourceCalculator()); + + when(lQueue.getMinimumAllocation()).thenReturn(MINIMUM_ALLOCATION); + when(lQueue.getEffectiveMaxCapacityDown(anyString(), any(Resource.class))) + .thenReturn(MAX_RESOURCE_LIMIT); + when(labelMgr.getResourceByLabel(anyString(), any(Resource.class))) + .thenReturn(CLUSTER_RESOURCE); + usersManager.setUsageRatio(CommonNodeLabelsManager.NO_LABEL, 0.5f); + usersManager.setUserLimit( + CapacitySchedulerConfiguration.DEFAULT_USER_LIMIT); + usersManager.setUserLimitFactor( + CapacitySchedulerConfiguration.DEFAULT_USER_LIMIT_FACTOR); + } + + @Test + public void testComputeUserLimitWithZeroCapacityQueue() { + when(lQueue.getEffectiveCapacity(anyString())) + .thenReturn(Resources.none()); + + checkLimit(MAX_RESOURCE_LIMIT); + } + + @Test + public void testComputeUserLimitWithNonZeroCapacityQueue() { + when(lQueue.getEffectiveCapacity(anyString())) + .thenReturn(NON_ZERO_CAPACITY); + + checkLimit(NON_ZERO_CAPACITY); + } + + private void checkLimit(Resource expectedLimit) { + Resource limit = usersManager.computeUserLimit(TEST_USER, + CLUSTER_RESOURCE, + CommonNodeLabelsManager.NO_LABEL, + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, + true); + + assertEquals("User limit", expectedLimit, limit); + } +} From cbb9c9062e0226f40a3cbceee1b5d4fc22f2f470 Mon Sep 17 00:00:00 2001 From: July <51110188+yikf@users.noreply.github.com> Date: Sat, 5 Jun 2021 04:36:09 +0800 Subject: [PATCH 381/666] HDFS-16033 Fix issue of the StatisticsDataReferenceCleaner cleanUp (#3042) Contributed by kaifeiYi (yikf). Signed-off-by: Mingliang Liu Reviewed-by: Steve Loughran --- .../src/main/java/org/apache/hadoop/fs/FileSystem.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index c6cf941cee7ab..057382bed9cde 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -4004,12 +4004,19 @@ public void cleanUp() { * Background action to act on references being removed. */ private static class StatisticsDataReferenceCleaner implements Runnable { + /** + * Represents the timeout period expires for remove reference objects from + * the STATS_DATA_REF_QUEUE when the queue is empty. + */ + private static final int REF_QUEUE_POLL_TIMEOUT = 10000; + @Override public void run() { while (!Thread.interrupted()) { try { StatisticsDataReference ref = - (StatisticsDataReference)STATS_DATA_REF_QUEUE.remove(); + (StatisticsDataReference)STATS_DATA_REF_QUEUE. + remove(REF_QUEUE_POLL_TIMEOUT); ref.cleanUp(); } catch (InterruptedException ie) { LOGGER.warn("Cleaner thread interrupted, will stop", ie); From 5066b30cced867becb9a24eaf65bf81fdf0de508 Mon Sep 17 00:00:00 2001 From: Hideyuki Furue Date: Wed, 2 Jun 2021 19:56:38 +0900 Subject: [PATCH 382/666] Fix container-executor Signed-off-by: Akira Ajisaka --- .../impl/container-executor.c | 130 +++++++++++++----- .../native/container-executor/impl/main.c | 4 +- .../container-executor/impl/runc/runc_reap.c | 6 +- .../native/container-executor/impl/util.c | 10 +- .../impl/utils/docker-util.c | 4 +- .../impl/utils/string-utils.c | 1 + .../test/test-container-executor.c | 41 ++++++ 7 files changed, 158 insertions(+), 38 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c index d69acf33abe10..ab03d3991dc7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c @@ -49,6 +49,10 @@ #include #include #include +#ifdef __linux +#include +#include +#endif #ifndef HAVE_FCHMODAT #include "compat/fchmodat.h" @@ -231,6 +235,19 @@ static int write_pid_to_cgroup_as_root(const char* cgroup_file, pid_t pid) { goto cleanup; } + // statfs + struct statfs buf; + if (statfs(cgroup_file, &buf) == -1) { + fprintf(LOGFILE, "Can't statfs file %s as node manager - %s\n", cgroup_file, + strerror(errno)); + rc = -1; + goto cleanup; + } else if (buf.f_type != CGROUP_SUPER_MAGIC) { + fprintf(LOGFILE, "Pid file %s is not located on cgroup filesystem\n", cgroup_file); + rc = -1; + goto cleanup; + } + // open int cgroup_fd = open(cgroup_file, O_WRONLY | O_APPEND, 0); if (cgroup_fd == -1) { @@ -524,27 +541,16 @@ int is_runc_support_enabled() { /** * Utility function to concatenate argB to argA using the concat_pattern. + * For historical reasons, redundant argument numArgs exists. */ char *concatenate(char *concat_pattern, char *return_path_name, int numArgs, ...) { va_list ap; va_start(ap, numArgs); - int strlen_args = 0; - char *arg = NULL; - int j; - for (j = 0; j < numArgs; j++) { - arg = va_arg(ap, char*); - if (arg == NULL) { - fprintf(LOGFILE, "One of the arguments passed for %s is null.\n", - return_path_name); - return NULL; - } - strlen_args += strlen(arg); - } + int str_len = vsnprintf(NULL, 0, concat_pattern, ap) + 1; va_end(ap); char *return_path = NULL; - int str_len = strlen(concat_pattern) + strlen_args + 1; return_path = (char *) malloc(str_len); if (return_path == NULL) { @@ -785,6 +791,10 @@ static int create_container_directories(const char* user, const char *app_id, if (container_dir == NULL) { return OUT_OF_MEMORY; } + if (strstr(container_dir, "..") != 0) { + fprintf(LOGFILE, "Unsupported container directory path detected.\n"); + return COULD_NOT_CREATE_WORK_DIRECTORIES; + } if (mkdirs(container_dir, perms) == 0) { result = 0; } @@ -812,19 +822,26 @@ static int create_container_directories(const char* user, const char *app_id, char *container_log_dir = get_app_log_directory(*log_dir_ptr, combined_name); int check = check_nm_local_dir(nm_uid, *log_dir_ptr); if (check != 0) { - container_log_dir = NULL; - } - if (strstr(container_log_dir, "..") != 0) { - fprintf(LOGFILE, "Unsupported container log directory path detected.\n"); - container_log_dir = NULL; + free(container_log_dir); + free(combined_name); + return COULD_NOT_CREATE_APP_LOG_DIRECTORIES; } if (container_log_dir == NULL) { free(combined_name); return OUT_OF_MEMORY; + } + if (strstr(container_log_dir, "..") != 0) { + fprintf(LOGFILE, "Unsupported container log directory path detected.\n"); + free(container_log_dir); + free(combined_name); + return COULD_NOT_CREATE_APP_LOG_DIRECTORIES; } else if (mkdirs(container_log_dir, logdir_perms) != 0) { free(container_log_dir); } else { result = 0; + if (chosen_container_log_dir != NULL) { + free(chosen_container_log_dir); + } chosen_container_log_dir = strdup(container_log_dir); free(container_log_dir); } @@ -845,6 +862,12 @@ static int create_container_directories(const char* user, const char *app_id, return OUT_OF_MEMORY; } + if (strstr(tmp_dir, "..") != 0 || strstr(private_tmp_dir, "..") != 0 || strstr(private_var_tmp_dir, "..") != 0) { + fprintf(ERRORFILE, "Unsupported tmp directory path detected.\n"); + result = COULD_NOT_CREATE_TMP_DIRECTORIES; + goto cleanup; + } + if (mkdirs(tmp_dir, perms) != 0) { fprintf(ERRORFILE, "Could not create tmp_dir: %s\n", tmp_dir); result = COULD_NOT_CREATE_TMP_DIRECTORIES; @@ -857,7 +880,7 @@ static int create_container_directories(const char* user, const char *app_id, goto cleanup; } - // clear group sticky bit on private_tmp_dir + // clear setgid bit on private_tmp_dir if (chmod(private_tmp_dir, perms) != 0) { fprintf(ERRORFILE, "Could not chmod private_tmp_dir: %s\n", private_tmp_dir); result = COULD_NOT_CREATE_TMP_DIRECTORIES; @@ -870,7 +893,7 @@ static int create_container_directories(const char* user, const char *app_id, goto cleanup; } - // clear group sticky bit on private_tmp_dir + // clear setgid bit on private_tmp_dir if (chmod(private_var_tmp_dir, perms) != 0) { fprintf(ERRORFILE, "Could not chmod private_var_tmp_dir: %s\n", private_var_tmp_dir); result = COULD_NOT_CREATE_TMP_DIRECTORIES; @@ -1053,7 +1076,7 @@ static int change_owner(const char* path, uid_t user, gid_t group) { * return non-0 on failure */ int create_directory_for_user(const char* path) { - // set 2750 permissions and group sticky bit + // set 750 permissions and setgid bit mode_t permissions = S_IRWXU | S_IRGRP | S_IXGRP | S_ISGID; uid_t user = geteuid(); gid_t group = getegid(); @@ -1066,13 +1089,13 @@ int create_directory_for_user(const char* path) { if (ret == 0) { if (0 == mkdir(path, permissions) || EEXIST == errno) { - // need to reassert the group sticky bit + // need to reassert the setgid bit if (change_owner(path, user, nm_gid) != 0) { fprintf(LOGFILE, "Failed to chown %s to %d:%d: %s\n", path, user, nm_gid, strerror(errno)); ret = -1; } else if (chmod(path, permissions) != 0) { - fprintf(LOGFILE, "Can't chmod %s to add the sticky bit - %s\n", + fprintf(LOGFILE, "Can't chmod %s to add the setgid bit - %s\n", path, strerror(errno)); ret = -1; } @@ -1212,6 +1235,11 @@ int initialize_user(const char *user, char* const* local_dirs) { fprintf(LOGFILE, "Couldn't get userdir directory for %s.\n", user); failed = 1; break; + // Avoid possible wrong validation. Username can contain double dots. + } else if (strstr(user_dir, "/../") != 0) { + fprintf(LOGFILE, "Unsupported userdir directory path detected.\n"); + failed = 1; + break; } if (create_directory_for_user(user_dir) != 0) { failed = 1; @@ -1233,6 +1261,9 @@ int create_log_dirs(const char *app_id, char * const * log_dirs) { } if (app_log_dir == NULL) { // try the next one + } else if (strstr(app_log_dir, "..") != 0) { + fprintf(LOGFILE, "Unsupported app-log directory path detected.\n"); + free(app_log_dir); } else if (create_directory_for_user(app_log_dir) != 0) { free(app_log_dir); return -1; @@ -1301,7 +1332,11 @@ int create_container_log_dirs(const char *container_id, const char *app_id, } int result = check_nm_local_dir(nm_uid, *log_root); - if (result != 0 && container_log_dir != NULL) { + if (result != 0) { + free(container_log_dir); + container_log_dir = NULL; + continue; + } else if (strstr(container_log_dir, "..") != 0) { fprintf(LOGFILE, "Unsupported container log directory path (%s) detected.\n", container_log_dir); free(container_log_dir); @@ -1346,6 +1381,9 @@ static char *create_app_dirs(const char *user, char *app_dir = get_app_directory(*nm_root, user, app_id); if (app_dir == NULL) { // try the next one + } else if (strstr(app_dir, "..") != 0) { + fprintf(LOGFILE, "Unsupported app directory path detected.\n"); + free(app_dir); } else if (mkdirs(app_dir, permissions) != 0) { free(app_dir); } else if (primary_app_dir == NULL) { @@ -1412,7 +1450,7 @@ int initialize_app(const char *user, const char *app_id, char *nmPrivate_credentials_file_copy = strdup(nmPrivate_credentials_file); // TODO: FIXME. The user's copy of creds should go to a path selected by - // localDirAllocatoir + // localDirAllocator char *cred_file_name = concatenate("%s/%s", "cred file", 2, primary_app_dir, basename(nmPrivate_credentials_file_copy)); if (cred_file_name == NULL) { @@ -1742,14 +1780,14 @@ int create_script_paths(const char *work_dir, int exit_code = -1; *script_file_dest = get_container_launcher_file(work_dir); - if (script_file_dest == NULL) { + if (*script_file_dest == NULL) { exit_code = OUT_OF_MEMORY; fprintf(ERRORFILE, "Could not create script_file_dest\n"); return exit_code; } *cred_file_dest = get_container_credentials_file(work_dir); - if (NULL == cred_file_dest) { + if (NULL == *cred_file_dest) { exit_code = OUT_OF_MEMORY; fprintf(ERRORFILE, "Could not create cred_file_dest\n"); return exit_code; @@ -1757,13 +1795,13 @@ int create_script_paths(const char *work_dir, if (https == 1) { *keystore_file_dest = get_container_keystore_file(work_dir); - if (NULL == keystore_file_dest) { + if (NULL == *keystore_file_dest) { exit_code = OUT_OF_MEMORY; fprintf(ERRORFILE, "Could not create keystore_file_dest\n"); return exit_code; } *truststore_file_dest = get_container_truststore_file(work_dir); - if (NULL == truststore_file_dest) { + if (NULL == *truststore_file_dest) { exit_code = OUT_OF_MEMORY; fprintf(ERRORFILE, "Could not create truststore_file_dest\n"); return exit_code; @@ -1917,6 +1955,12 @@ int create_user_filecache_dirs(const char * user, char* const* local_dirs) { rc = INITIALIZE_USER_FAILED; break; } + if (strstr(filecache_dir, "..") != 0) { + fprintf(LOGFILE, "Unsupported filecache directory path detected.\n"); + free(filecache_dir); + rc = INITIALIZE_USER_FAILED; + break; + } if (0 != mkdir(filecache_dir, permissions) && EEXIST != errno) { fprintf(LOGFILE, "Failed to create directory %s - %s\n", filecache_dir, strerror(errno)); @@ -1941,6 +1985,12 @@ int create_yarn_sysfs(const char* user, const char *app_id, return OUT_OF_MEMORY; } char *yarn_sysfs_dir = make_string("%s/%s", container_dir, "sysfs"); + if (strstr(yarn_sysfs_dir, "..") != 0) { + fprintf(LOGFILE, "Unsupported yarn sysfs directory path detected.\n"); + free(yarn_sysfs_dir); + free(container_dir); + return OUT_OF_MEMORY; + } if (mkdir(yarn_sysfs_dir, perms) == 0) { result = 0; } @@ -2062,7 +2112,6 @@ int launch_docker_container_as_user(const char * user, const char *app_id, if (exit_code != 0) { fprintf(ERRORFILE, "Could not create user yarn sysfs directory\n"); exit(-1); - goto cleanup; } docker_command = construct_docker_command(command_file); @@ -2096,6 +2145,12 @@ int launch_docker_container_as_user(const char * user, const char *app_id, docker_command_with_binary = flatten(docker_command); + if (docker_command_with_binary == NULL) { + fprintf (ERRORFILE, "Could not flatten docker command.\n"); + exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT; + goto cleanup; + } + // Launch container pid_t child_pid = fork(); if (child_pid == -1) { @@ -2800,6 +2855,7 @@ int list_as_user(const char *target_dir) { strerror(errno)); ret = -1; } + closedir(dir); } else { fprintf(LOGFILE, "Could not open directory %s - %s\n", target_dir, strerror(errno)); @@ -2857,8 +2913,10 @@ int is_empty(char *target_dir) { continue; } fprintf(LOGFILE, "Directory is not empty %s\n", target_dir); + closedir(dir); return 0; } + closedir(dir); return 1; } @@ -2886,7 +2944,7 @@ int mount_cgroup(const char *pair, const char *hierarchy) { goto cleanup; } if (hierarchy == NULL || strstr(hierarchy, "..") != NULL) { - fprintf(LOGFILE, "Unsupported cgroup hierarhy path detected.\n"); + fprintf(LOGFILE, "Unsupported cgroup hierarchy path detected.\n"); result = INVALID_COMMAND_PROVIDED; goto cleanup; } @@ -2907,8 +2965,13 @@ int mount_cgroup(const char *pair, const char *hierarchy) { result = INVALID_COMMAND_PROVIDED; goto cleanup; } + if (strlen(mount_path) + strlen(hierarchy) + 2 > EXECUTOR_PATH_MAX) { + fprintf(LOGFILE, "cgroup hierarchy path is too long.\n"); + result = INVALID_COMMAND_PROVIDED; + goto cleanup; + } if (mount("none", mount_path, "cgroup", 0, controller) == 0) { - char *buf = stpncpy(hier_path, mount_path, strlen(mount_path)); + char *buf = stpncpy(hier_path, mount_path, EXECUTOR_PATH_MAX); *buf++ = '/'; snprintf(buf, EXECUTOR_PATH_MAX - (buf - hier_path), "%s", hierarchy); @@ -3083,6 +3146,9 @@ char* flatten(char **args) { total = total + strlen(args[i]) + 1; } char *buffer = (char *) malloc(total * sizeof(char)); + if (buffer == NULL) { + return NULL; + } char *to = NULL; to = buffer; for (int i = 0; args[i] != NULL; i++) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c index ff59b96d23362..1b91e8a3d6cd0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c @@ -68,8 +68,8 @@ static void display_usage(FILE *stream) { fprintf(stream, " container-executor \n" " where command and command-args: \n" \ - " initialize container: %2d appid tokens nm-local-dirs " - "nm-log-dirs cmd app...\n" + " initialize container: %2d appid containerid tokens nm-local-dirs " + "nm-log-dirs cmd...\n" " launch container: %2d appid containerid workdir " "container-script tokens http-option pidfile nm-local-dirs nm-log-dirs resources ", INITIALIZE_CONTAINER, LAUNCH_CONTAINER); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_reap.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_reap.c index b67c60492fe16..ff5329b179670 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_reap.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/runc/runc_reap.c @@ -446,6 +446,7 @@ static struct mntent* get_layer_mounts(size_t* num_entries_out, size_t num_entries = 0; size_t entries_capacity = num_entries_per_alloc; struct mntent* entries = malloc(sizeof(*entries) * entries_capacity); + struct mntent* new_entries; if (entries == NULL) { fputs("Unable to allocate memory\n", ERRORFILE); goto fail; @@ -484,11 +485,12 @@ static struct mntent* get_layer_mounts(size_t* num_entries_out, if (num_entries == entries_capacity) { entries_capacity += num_entries_per_alloc; - entries = realloc(entries, sizeof(*entries) * entries_capacity); - if (entries == NULL) { + new_entries = realloc(entries, sizeof(*entries) * entries_capacity); + if (new_entries == NULL) { fputs("Unable to allocate memory\n", ERRORFILE); goto fail; } + entries = new_entries; } if (!copy_mntent(entries + num_entries, me)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c index 9567ccc001485..c8ee7b461e67b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c @@ -25,6 +25,7 @@ char** split_delimiter(char *value, const char *delim) { char **return_values = NULL; + char **new_return_values; char *temp_tok = NULL; char *tempstr = NULL; int size = 0; @@ -60,8 +61,15 @@ char** split_delimiter(char *value, const char *delim) { // Make sure returned values has enough space for the trailing NULL. if (size >= return_values_size - 1) { return_values_size += per_alloc_size; - return_values = (char **) realloc(return_values,(sizeof(char *) * + new_return_values = (char **) realloc(return_values,(sizeof(char *) * return_values_size)); + if (!new_return_values) { + fprintf(ERRORFILE, "Reallocation error for return_values in %s.\n", + __func__); + failed = 1; + goto cleanup; + } + return_values = new_return_values; // Make sure new added memory are filled with NULL for (int i = size; i < return_values_size; i++) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c index 8bc66b30f6446..b81468aa259d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c @@ -1385,7 +1385,9 @@ static char* get_docker_mount_options_string(mount_options *options) { return NULL; } - idx += sprintf(options_string, "%s", options->opts[0]); + if (options->num_opts > 0) { + idx += sprintf(options_string, "%s", options->opts[0]); + } for (i = 1; i < options->num_opts; i++) { idx += sprintf(options_string + idx, ",%s", options->opts[i]); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c index 62d54a9ea62e4..e9e733d4ebadd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c @@ -181,6 +181,7 @@ char *make_string(const char *fmt, ...) { int ret = vsnprintf(buf, buflen, fmt, vargs); va_end(vargs); if (ret < 0) { + free(buf); buf = NULL; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c index f75a5eafd69e0..f209ea53a0096 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c @@ -1322,6 +1322,46 @@ void test_trim_function() { free(trimmed); } +/** + * This test is used to verify that concatenate() works correctly + */ +void test_concatenate() { + char *concatenate(char *concat_pattern, char *return_path_name, int numArgs, ...); + printf("\nTesting concatenate function\n"); + + // numArgs: 0 + char *expected1 = "fixed1"; + char *actual1 = concatenate("fixed1", "test1", 0); + if (actual1 == NULL || strcmp(actual1, expected1) != 0) { + printf("FAIL: concatenate: test1: expected %s got %s\n", expected1, actual1); + exit(1); + } + + // numArgs: 1 + char *expected2 = "fixed1/var1"; + char *actual2 = concatenate("fixed1/%s", "test2", 1, "var1"); + if (actual2 == NULL || strcmp(actual2, expected2) != 0) { + printf("FAIL: concatenate: test2: expected %s got %s\n", expected2, actual2); + exit(1); + } + + // numArgs: 2 + char *expected3 = "fixed1/var1/fixed2/var2"; + char *actual3 = concatenate("fixed1/%s/fixed2/%s", "test3", 2, "var1", "var2"); + if (actual3 == NULL || strcmp(actual3, expected3) != 0) { + printf("FAIL: concatenate: test3: expected %s got %s\n", expected3, actual3); + exit(1); + } + + // concat_pattern with field width + char *expected4 = "[x ]"; + char *actual4 = concatenate("[%-10s]", "test4", 1, "x"); + if (actual4 == NULL || strcmp(actual4, expected4) != 0) { + printf("FAIL: concatenate: test4: expected %s got %s\n", expected4, actual4); + exit(1); + } +} + int is_empty(char *name); void test_is_empty() { @@ -1762,6 +1802,7 @@ int main(int argc, char **argv) { #endif test_trim_function(); + test_concatenate(); printf("\nFinished tests\n"); printf("\nAttempting to clean up from the run\n"); From c6a42be460c5274f7abb517d81ac632d09786932 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 7 Jun 2021 08:21:29 +0530 Subject: [PATCH 383/666] MAPREDUCE-7350. Replace Guava Lists usage by Hadoop's own Lists in hadoop-mapreduce-project (#3074) --- .../hadoop/mapreduce/v2/app/MockJobs.java | 2 +- .../counters/CounterGroupFactory.java | 2 +- .../mapreduce/lib/input/FileInputFormat.java | 5 ++- .../mapreduce/util/CountersStrings.java | 3 +- .../hadoop/mapred/TestFileInputFormat.java | 3 +- .../lib/input/TestFileInputFormat.java | 3 +- .../mapred/nativetask/kvtest/KVTest.java | 2 +- .../uploader/TestFrameworkUploader.java | 2 +- .../hadoop-mapreduce-client/pom.xml | 32 +++++++++++++++++++ .../hadoop-mapreduce-examples/pom.xml | 32 +++++++++++++++++++ 10 files changed, 73 insertions(+), 13 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java index 586d86aee1d93..38ceeada62d5c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java @@ -61,6 +61,7 @@ import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -70,7 +71,6 @@ import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; public class MockJobs extends MockApps { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java index 278d0a73a0cbd..8e0c864586384 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java @@ -21,7 +21,6 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; @@ -30,6 +29,7 @@ import org.apache.hadoop.mapreduce.JobCounter; import org.apache.hadoop.mapreduce.TaskCounter; import org.apache.hadoop.mapreduce.util.ResourceBundles; +import org.apache.hadoop.util.Lists; /** * An abstract class to provide common implementation of the diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java index a71aaade013c0..5b0f88f868c70 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java @@ -41,15 +41,14 @@ import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.security.TokenCache; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - -/** +/** * A base class for file-based {@link InputFormat}s. * *

    FileInputFormat is the base class for all file-based diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java index 9e32e70ede6a4..429425ad2d558 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java @@ -21,13 +21,12 @@ import java.text.ParseException; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.mapreduce.counters.AbstractCounters; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.counters.CounterGroupBase; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java index 0bf29a500661f..3f3cb24a1a5aa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.RawLocalFileSystem; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -43,8 +44,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - @RunWith(value = Parameterized.class) public class TestFileInputFormat { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java index 52b987aebb1c7..8103ce8234177 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java @@ -43,6 +43,7 @@ import org.apache.hadoop.mapred.SplitLocationInfo; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.junit.After; import org.junit.Assert; @@ -54,8 +55,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - @RunWith(value = Parameterized.class) public class TestFileInputFormat { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVTest.java index cee7675a4047f..0771c669148ee 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVTest.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVTest.java @@ -30,6 +30,7 @@ import org.apache.hadoop.mapred.nativetask.testutil.ResultVerifier; import org.apache.hadoop.mapred.nativetask.testutil.ScenarioConfiguration; import org.apache.hadoop.mapred.nativetask.testutil.TestConstants; +import org.apache.hadoop.util.Lists; import org.junit.AfterClass; import org.apache.hadoop.util.NativeCodeLoader; import org.junit.Assume; @@ -42,7 +43,6 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; @RunWith(Parameterized.class) public class KVTest { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java index ed4d9ce68c3c3..7749db1be846d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java @@ -18,7 +18,6 @@ package org.apache.hadoop.mapred.uploader; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; import org.apache.commons.io.FileUtils; @@ -32,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.util.Lists; import org.junit.Assert; import org.junit.Assume; import org.junit.Before; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 55940ae4ff744..112a2c73e8ba8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -177,6 +177,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index 48cf27efe437f..c4f89acb41f4f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -155,6 +155,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + From c272b2af537c07f8d29fa1bac7695db5b2231b06 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 7 Jun 2021 09:54:09 +0530 Subject: [PATCH 384/666] HADOOP-17743. Replace Guava Lists usage by Hadoop's own Lists in hadoop-common, hadoop-tools and cloud-storage projects (#3072) --- .../hadoop-cloud-storage/pom.xml | 37 +++++++++++++++++++ .../hadoop-cos/pom.xml | 32 ++++++++++++++++ .../hadoop-huaweicloud/pom.xml | 32 ++++++++++++++++ .../apache/hadoop/fs/obs/OBSCommonUtils.java | 2 +- hadoop-common-project/hadoop-common/pom.xml | 32 ++++++++++++++++ .../org/apache/hadoop/crypto/CryptoCodec.java | 2 +- .../hadoop/fs/permission/AclStatus.java | 2 +- .../apache/hadoop/fs/permission/AclUtil.java | 2 +- .../apache/hadoop/fs/shell/AclCommands.java | 3 +- .../java/org/apache/hadoop/ha/NodeFencer.java | 2 +- .../org/apache/hadoop/http/HttpServer2.java | 2 +- .../metrics2/impl/MBeanInfoBuilder.java | 3 +- .../metrics2/impl/MetricsCollectorImpl.java | 3 +- .../impl/MetricsRecordBuilderImpl.java | 3 +- .../metrics2/impl/MetricsSystemImpl.java | 2 +- .../apache/hadoop/metrics2/util/Servers.java | 3 +- .../apache/hadoop/util/ChunkedArrayList.java | 1 - .../apache/hadoop/util/JvmPauseMonitor.java | 1 - .../java/org/apache/hadoop/util/ZKUtil.java | 1 - .../hadoop/conf/TestReconfiguration.java | 2 +- .../org/apache/hadoop/ha/DummyHAService.java | 2 +- .../org/apache/hadoop/ha/TestNodeFencer.java | 3 +- .../hadoop/ha/TestShellCommandFencer.java | 6 +-- .../util/TestApplicationClassLoader.java | 1 - .../hadoop/util/TestDirectBufferPool.java | 2 - hadoop-common-project/hadoop-registry/pom.xml | 33 ++++++++++++++++- .../client/impl/zk/RegistrySecurity.java | 2 +- hadoop-tools/hadoop-aws/pom.xml | 32 ++++++++++++++++ .../org/apache/hadoop/fs/s3a/S3AUtils.java | 2 +- .../hadoop/fs/s3a/auth/RolePolicies.java | 2 +- .../hadoop/fs/s3a/impl/RenameOperation.java | 2 +- .../s3a/s3guard/DumpS3GuardDynamoTable.java | 5 +-- .../fs/s3a/s3guard/DynamoDBMetadataStore.java | 2 +- .../fs/s3a/ITestS3AFailureHandling.java | 2 +- .../fs/s3a/ITestS3GuardListConsistency.java | 2 +- .../fs/s3a/commit/ITestCommitOperations.java | 2 +- .../fs/s3a/commit/TestMagicCommitPaths.java | 2 +- .../s3a/commit/staging/StagingTestBase.java | 2 +- .../TestStagingPartitionedFileListing.java | 2 +- .../TestStagingPartitionedTaskCommit.java | 2 +- .../s3a/impl/TestPartialDeleteFailures.java | 2 +- .../s3guard/ITestDynamoDBMetadataStore.java | 2 +- .../s3a/s3guard/TestPathOrderComparators.java | 12 +++--- hadoop-tools/hadoop-azure/pom.xml | 33 +++++++++++++++++ .../ITestAbfsIdentityTransformer.java | 2 +- .../ITestAzureBlobFileSystemCheckAccess.java | 2 +- .../azurebfs/ITestAzureBlobFilesystemAcl.java | 2 +- .../fs/azurebfs/ITestCustomerProvidedKey.java | 2 +- hadoop-tools/hadoop-distcp/pom.xml | 32 ++++++++++++++++ .../hadoop/tools/CopyListingFileStatus.java | 2 +- .../hadoop/tools/SimpleCopyListing.java | 2 +- .../hadoop/tools/util/TestDistCpUtils.java | 2 +- .../hadoop-dynamometer-infra/pom.xml | 32 ++++++++++++++++ .../tools/dynamometer/ApplicationMaster.java | 2 +- .../hadoop/tools/dynamometer/Client.java | 2 +- .../hadoop-dynamometer-workload/pom.xml | 32 ++++++++++++++++ .../workloadgenerator/CreateFileMapper.java | 2 +- .../audit/AuditReplayMapper.java | 2 +- hadoop-tools/hadoop-kafka/pom.xml | 32 ++++++++++++++++ .../metrics2/impl/TestKafkaMetrics.java | 2 +- 60 files changed, 411 insertions(+), 65 deletions(-) diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml index a8f45a7f3a222..699ce1abfc692 100644 --- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml @@ -32,6 +32,43 @@ cloud-storage + + + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + + + + org.apache.hadoop diff --git a/hadoop-cloud-storage-project/hadoop-cos/pom.xml b/hadoop-cloud-storage-project/hadoop-cos/pom.xml index fa47e354c7998..b1f9ccb6e3e04 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cos/pom.xml @@ -96,6 +96,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml b/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml index 43360c11cd9d2..9386152c5dd7d 100755 --- a/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml @@ -92,6 +92,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSCommonUtils.java b/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSCommonUtils.java index ba7550bc0d647..d477cec186b0e 100644 --- a/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSCommonUtils.java +++ b/hadoop-cloud-storage-project/hadoop-huaweicloud/src/main/java/org/apache/hadoop/fs/obs/OBSCommonUtils.java @@ -19,7 +19,6 @@ package org.apache.hadoop.fs.obs; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.obs.services.ObsClient; import com.obs.services.exception.ObsException; import com.obs.services.model.AbortMultipartUploadRequest; @@ -53,6 +52,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.security.ProviderUtils; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 249471cc8568d..c234daa46d001 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -662,6 +662,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index 111e91b5c969e..64c754faa59d8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -26,13 +26,13 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.PerformanceAdvisory; import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java index 674b88083d3dc..d05f9550c59ce 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java @@ -24,7 +24,7 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Objects; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; /** * An AclStatus contains the ACL information of a specific file. AclStatus diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java index 58b24f200429b..1447e80f3eb2b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; /** * AclUtil contains utility methods for manipulating ACLs. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java index dcff0094eccf5..7a8a9a24da625 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java @@ -22,8 +22,6 @@ import java.util.LinkedList; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -35,6 +33,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.ScopedAclEntries; +import org.apache.hadoop.util.Lists; /** * Acl related operations diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java index 7f4a0790a3bc1..fb78a4c47dcde 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java @@ -25,10 +25,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 9f7562d35aa2c..9f81eed76a730 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -57,7 +57,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.spi.container.servlet.ServletContainer; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; @@ -82,6 +81,7 @@ import org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory; import org.apache.hadoop.security.ssl.FileMonitoringTimerTask; import org.apache.hadoop.security.ssl.SSLFactory; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java index a297072d236d4..b32cbdca839b2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java @@ -22,12 +22,11 @@ import javax.management.MBeanAttributeInfo; import javax.management.MBeanInfo; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.MetricsVisitor; +import org.apache.hadoop.util.Lists; /** * Helper class to build MBeanInfo from metrics records diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java index 4b4b70bd8e607..cce55d4368198 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java @@ -22,12 +22,13 @@ import java.util.List; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsFilter; +import org.apache.hadoop.util.Lists; + import static org.apache.hadoop.metrics2.lib.Interns.*; @InterfaceAudience.Private diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java index 19e4c3b6d4187..ef0f2b2a14f93 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java @@ -21,8 +21,6 @@ import java.util.Collections; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsCollector; @@ -30,6 +28,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.lib.Interns; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java index a6edf08e5a717..535ee914ef870 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java @@ -30,7 +30,6 @@ import java.util.TimerTask; import javax.management.ObjectName; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; @@ -58,6 +57,7 @@ import org.apache.hadoop.metrics2.lib.MetricsSourceBuilder; import org.apache.hadoop.metrics2.lib.MutableStat; import org.apache.hadoop.metrics2.util.MBeans; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java index 2bd49e9f211ba..e8d32876f1c08 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java @@ -22,11 +22,10 @@ import java.net.InetSocketAddress; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.Lists; /** * Helpers to handle server addresses diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java index ff7197ce52e4d..55b75634cb68d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java @@ -26,7 +26,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Simplified List implementation which stores elements as a list diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java index a792993493bca..feb4f9b9d3f01 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java @@ -29,7 +29,6 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java index 8e4e67d1b61e0..6d38c606c8c4c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java @@ -29,7 +29,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.io.Files; /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java index 4948df9b1f4cb..0216551ad9822 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java @@ -19,8 +19,8 @@ package org.apache.hadoop.conf; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java index b5739f7935ed7..7cb2ab1318bf8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java @@ -33,9 +33,9 @@ import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; import org.mockito.Mockito; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestNodeFencer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestNodeFencer.java index 972113eefa91f..be67848e2120a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestNodeFencer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestNodeFencer.java @@ -24,13 +24,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Shell; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - public class TestNodeFencer { private HAServiceTarget MOCK_TARGET; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java index dcff9e30cdba2..88afb35a8dd9a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java @@ -21,9 +21,9 @@ import java.lang.reflect.Method; import java.net.InetSocketAddress; +import java.util.Arrays; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.util.Shell; @@ -223,8 +223,8 @@ public void testCommandAbbreviation() { */ private static class LogAnswer implements Answer { - private static final List DELEGATE_METHODS = Lists.asList("error", - new String[]{"warn", "info", "debug", "trace"}); + private static final List DELEGATE_METHODS = Arrays.asList( + "error", "warn", "info", "debug", "trace"); @Override public Object answer(InvocationOnMock invocation) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java index 0fb887676274a..4be74ba8e7e97 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java @@ -42,7 +42,6 @@ import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestApplicationClassLoader { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java index d6da2f86cc3c4..592f40aa16c2d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java @@ -26,8 +26,6 @@ import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - public class TestDirectBufferPool { final org.apache.hadoop.util.DirectBufferPool pool = new org.apache.hadoop.util.DirectBufferPool(); diff --git a/hadoop-common-project/hadoop-registry/pom.xml b/hadoop-common-project/hadoop-registry/pom.xml index 0058832e6110b..8e8b1e064e7e7 100644 --- a/hadoop-common-project/hadoop-registry/pom.xml +++ b/hadoop-common-project/hadoop-registry/pom.xml @@ -260,7 +260,38 @@ - + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java index 065cbe3296b09..945381022b127 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java @@ -20,7 +20,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -29,6 +28,7 @@ import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.ServiceStateException; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ZKUtil; import org.apache.zookeeper.Environment; import org.apache.zookeeper.ZooDefs; diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 0cab5ada2169d..414e69780ea6c 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -467,6 +467,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java index b6af8e7f27c20..220355aaa3f33 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java @@ -54,7 +54,7 @@ import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java index 7b632e2d90a7b..22ced94f6dacc 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java @@ -23,7 +23,7 @@ import java.util.Collections; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java index 7b13d0d3c7c42..efc789ba322fa 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java @@ -28,7 +28,7 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.transfer.model.CopyResult; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java index 2a7cb4c1b7247..e4c9ef6de8ba3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java @@ -35,7 +35,6 @@ import java.util.List; import com.amazonaws.services.dynamodbv2.xspec.ExpressionSpecBuilder; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -265,8 +264,8 @@ public int execute() throws ServiceLaunchException, IOException { * @param type of queue */ private void pushAll(Deque queue, List entries) { - List reversed = Lists.reverse(entries); - for (T t : reversed) { + Collections.reverse(entries); + for (T t : entries) { queue.push(t); } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index e4542eb1bcfa6..b113d2017829a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -67,7 +67,7 @@ import org.apache.hadoop.fs.s3a.impl.InternalConstants; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;; import org.slf4j.Logger; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java index 71a5794371107..e395207589812 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java @@ -20,7 +20,7 @@ import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.MultiObjectDeleteException; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.assertj.core.api.Assertions; import org.junit.Assume; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java index 09f66df4c2ec0..17dc450707016 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java @@ -32,7 +32,7 @@ import org.apache.hadoop.fs.contract.s3a.S3AContract; import com.amazonaws.services.s3.model.S3ObjectSummary; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.assertj.core.api.Assertions; import org.junit.Assume; import org.junit.Test; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java index e8c5d888d10dc..2bc6434ccd494 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java @@ -25,7 +25,7 @@ import java.util.List; import com.amazonaws.services.s3.model.PartETag; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.assertj.core.api.Assertions; import org.junit.Test; import org.slf4j.Logger; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java index 073922cbc0e5f..fdc4ec8058a6c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java @@ -22,7 +22,7 @@ import java.util.Arrays; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java index 4e425583a3cfd..6e13fd0227a3b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java @@ -44,7 +44,7 @@ import com.amazonaws.services.s3.model.MultipartUploadListing; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.junit.AfterClass; import org.junit.Assert; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java index ce55480323872..76a0de225371e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java @@ -28,7 +28,7 @@ import java.util.UUID; import java.util.stream.Collectors; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.Test; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java index 9e98a226ef150..fb252102491d6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java @@ -25,7 +25,7 @@ import java.util.UUID; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.assertj.core.api.Assertions; import org.junit.BeforeClass; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java index 0d9ba1d304490..f43860e1e8b36 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java @@ -28,7 +28,7 @@ import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.MultiObjectDeleteException; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.assertj.core.api.Assertions; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java index 580386a09f6b0..93a9e33aaaf48 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java @@ -46,7 +46,7 @@ import com.amazonaws.services.dynamodbv2.model.Tag; import com.amazonaws.services.dynamodbv2.model.TagResourceRequest; import com.amazonaws.services.dynamodbv2.model.UntagResourceRequest; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.assertj.core.api.Assertions; import org.apache.commons.collections.CollectionUtils; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java index 9b8e5918efee1..03233df69e198 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java @@ -21,11 +21,11 @@ import java.util.Comparator; import java.util.List; +import org.apache.hadoop.util.Lists; import org.junit.Test; import org.apache.hadoop.fs.Path; -import static org.apache.hadoop.thirdparty.com.google.common.collect.Lists.newArrayList; import static org.apache.hadoop.fs.s3a.s3guard.PathOrderComparators.TOPMOST_PATH_FIRST; import static org.apache.hadoop.fs.s3a.s3guard.PathOrderComparators.TOPMOST_PATH_LAST; import static org.assertj.core.api.Assertions.assertThat; @@ -119,13 +119,13 @@ public void testSortOrderConstant() throws Throwable { List sort1 = verifySorted(ROOT, DIR_A, DIR_B, DIR_A_FILE_1, DIR_A_FILE_2, DIR_B_FILE_3, DIR_B_FILE_4); - List sort2 = newArrayList(sort1); + List sort2 = Lists.newArrayList(sort1); assertSortsTo(sort2, sort1, true); } @Test public void testSortReverse() throws Throwable { - List sort1 = newArrayList( + List sort1 = Lists.newArrayList( ROOT, DIR_A, DIR_B, @@ -133,7 +133,7 @@ public void testSortReverse() throws Throwable { DIR_A_FILE_2, DIR_B_FILE_3, DIR_B_FILE_4); - List expected = newArrayList( + List expected = Lists.newArrayList( DIR_B_FILE_4, DIR_B_FILE_3, DIR_A_FILE_2, @@ -146,8 +146,8 @@ public void testSortReverse() throws Throwable { private List verifySorted(Path... paths) { - List original = newArrayList(paths); - List sorted = newArrayList(paths); + List original = Lists.newArrayList(paths); + List sorted = Lists.newArrayList(paths); assertSortsTo(original, sorted, true); return sorted; } diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index d5d0f1fcd7303..59aae51e60788 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -101,6 +101,39 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + + diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java index f0473789cf161..5868d083e12e9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java @@ -22,7 +22,7 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformer; import org.apache.hadoop.fs.permission.AclEntry; import org.junit.Test; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java index e52071d92e574..ebd64812d45b9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java @@ -22,7 +22,7 @@ import java.lang.reflect.Field; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.junit.Assume; import org.junit.Test; import org.mockito.Mockito; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java index 74cf02a4f1f68..245ae846e3682 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azurebfs; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import java.io.FileNotFoundException; import java.util.List; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java index 9229905b4623c..11165c8ceb723 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.assertj.core.api.Assertions; import org.junit.Assume; import org.junit.Test; diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml index 7e5aaebc08513..8ff7ebb5a5f1c 100644 --- a/hadoop-tools/hadoop-distcp/pom.xml +++ b/hadoop-tools/hadoop-distcp/pom.xml @@ -223,6 +223,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java index 02c623157d8a1..b4a74405c4f7e 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java @@ -41,7 +41,7 @@ import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.thirdparty.com.google.common.base.Objects; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java index fb7ace5f1ead5..2e0319867d5f1 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java @@ -18,7 +18,7 @@ package org.apache.hadoop.tools; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java index 7d17167c3e8df..0a1f88e378586 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java @@ -43,7 +43,7 @@ import org.junit.BeforeClass; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import java.io.FileNotFoundException; import java.io.IOException; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml index d6e3d4b228d91..4b4367c6e0e05 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml @@ -137,6 +137,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java index 094721b98d58a..e44f811f0db41 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java @@ -18,7 +18,7 @@ package org.apache.hadoop.tools.dynamometer; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import java.io.IOException; import java.nio.ByteBuffer; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java index 3c8baec15c74f..1731780af9cd0 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java @@ -21,7 +21,7 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import java.util.Optional; import java.util.function.Supplier; import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml index eb54de36d4d68..bd016a8bff414 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml @@ -83,6 +83,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/CreateFileMapper.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/CreateFileMapper.java index 64b8dc28e67d0..318b47d19ff6f 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/CreateFileMapper.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/CreateFileMapper.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.dynamometer.workloadgenerator; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import java.io.IOException; import java.io.OutputStream; import java.net.URI; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayMapper.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayMapper.java index c46f720bd0a87..fdd8f20d85bd5 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayMapper.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayMapper.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.dynamometer.workloadgenerator.audit; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import java.util.Optional; import java.util.function.Function; diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml index d8c01eda23bee..2b021643fe512 100644 --- a/hadoop-tools/hadoop-kafka/pom.xml +++ b/hadoop-tools/hadoop-kafka/pom.xml @@ -70,6 +70,38 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java index fb19172359a5f..03c479fba59c5 100644 --- a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java +++ b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.impl; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.commons.configuration2.SubsetConfiguration; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; From 8252b74cf4fca8497e50301c5da4fd30817121fd Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 7 Jun 2021 14:37:30 +0900 Subject: [PATCH 385/666] HDFS-16050. Some dynamometer tests fail. (#3079) Signed-off-by: Takanobu Asanuma --- .../hadoop-dynamometer/hadoop-dynamometer-blockgen/pom.xml | 2 +- .../hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml | 2 +- .../hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml | 5 +++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-blockgen/pom.xml b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-blockgen/pom.xml index cb593e73df17d..2b8c4294066f3 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-blockgen/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-blockgen/pom.xml @@ -36,7 +36,7 @@ org.mockito - mockito-all + mockito-core test diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml index 4b4367c6e0e05..1e30247cbb1e9 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml @@ -74,7 +74,7 @@ org.mockito - mockito-all + mockito-core test diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml index bd016a8bff414..cb7000555ee66 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml @@ -39,6 +39,11 @@ junit test + + org.mockito + mockito-core + test + org.apache.hadoop hadoop-minicluster From 9e32d1608df1a8441f77f171edc946fed8054b05 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Tue, 8 Jun 2021 08:41:08 +0530 Subject: [PATCH 386/666] HADOOP-17727. Modularize docker images (#3043) Reviewed-by: Inigo Goiri --- dev-support/bin/create-release | 2 +- dev-support/docker/Dockerfile | 149 ++------- dev-support/docker/Dockerfile_aarch64 | 119 ++----- dev-support/docker/Dockerfile_centos_7 | 151 ++------- dev-support/docker/Dockerfile_centos_8 | 144 ++------- dev-support/docker/README.md | 74 +++++ .../docker/pkg-resolver/check_platform.py | 50 +++ .../docker/pkg-resolver/install-boost.sh | 56 ++++ .../docker/pkg-resolver/install-cmake.sh | 53 +++ .../pkg-resolver/install-common-pkgs.sh | 28 ++ .../docker/pkg-resolver/install-epel.sh | 49 +++ .../docker/pkg-resolver/install-hadolint.sh | 35 ++ .../pkg-resolver/install-intel-isa-l.sh | 58 ++++ .../docker/pkg-resolver/install-maven.sh | 49 +++ .../docker/pkg-resolver/install-nodejs.sh | 54 ++++ .../docker/pkg-resolver/install-protobuf.sh | 57 ++++ .../docker/pkg-resolver/install-spotbugs.sh | 50 +++ .../docker/pkg-resolver/install-yasm.sh | 49 +++ .../docker/pkg-resolver/install-zstandard.sh | 53 +++ dev-support/docker/pkg-resolver/packages.json | 302 ++++++++++++++++++ .../docker/pkg-resolver/platforms.json | 6 + dev-support/docker/pkg-resolver/resolve.py | 61 ++++ 22 files changed, 1190 insertions(+), 459 deletions(-) create mode 100644 dev-support/docker/README.md create mode 100644 dev-support/docker/pkg-resolver/check_platform.py create mode 100644 dev-support/docker/pkg-resolver/install-boost.sh create mode 100644 dev-support/docker/pkg-resolver/install-cmake.sh create mode 100644 dev-support/docker/pkg-resolver/install-common-pkgs.sh create mode 100644 dev-support/docker/pkg-resolver/install-epel.sh create mode 100644 dev-support/docker/pkg-resolver/install-hadolint.sh create mode 100644 dev-support/docker/pkg-resolver/install-intel-isa-l.sh create mode 100644 dev-support/docker/pkg-resolver/install-maven.sh create mode 100644 dev-support/docker/pkg-resolver/install-nodejs.sh create mode 100644 dev-support/docker/pkg-resolver/install-protobuf.sh create mode 100644 dev-support/docker/pkg-resolver/install-spotbugs.sh create mode 100644 dev-support/docker/pkg-resolver/install-yasm.sh create mode 100644 dev-support/docker/pkg-resolver/install-zstandard.sh create mode 100644 dev-support/docker/pkg-resolver/packages.json create mode 100644 dev-support/docker/pkg-resolver/platforms.json create mode 100644 dev-support/docker/pkg-resolver/resolve.py diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release index 39a5d0d319837..31ae6ee1b0659 100755 --- a/dev-support/bin/create-release +++ b/dev-support/bin/create-release @@ -514,7 +514,7 @@ function dockermode echo "USER ${user_name}" printf "\n\n" - ) | docker build -t "${imgname}" - + ) | docker build -t "${imgname}" -f - "${BASEDIR}"/dev-support/docker/ run docker run -i -t \ --privileged \ diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index ef2f1562613e8..fac364bbd4363 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -32,56 +32,20 @@ RUN echo APT::Install-Suggests "0"\; >> /etc/apt/apt.conf.d/10disableextras ENV DEBIAN_FRONTEND noninteractive ENV DEBCONF_TERSE true -# hadolint ignore=DL3008 +###### +# Platform package dependency resolver +###### +COPY pkg-resolver pkg-resolver +RUN chmod a+x pkg-resolver/*.sh pkg-resolver/*.py \ + && chmod a+r pkg-resolver/*.json + +###### +# Install packages from apt +###### +# hadolint ignore=DL3008,SC2046 RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends \ - ant \ - apt-utils \ - bats \ - build-essential \ - bzip2 \ - clang \ - cmake \ - curl \ - doxygen \ - fuse \ - g++ \ - gcc \ - git \ - gnupg-agent \ - hugo \ - libbcprov-java \ - libbz2-dev \ - libcurl4-openssl-dev \ - libfuse-dev \ - libprotobuf-dev \ - libprotoc-dev \ - libsasl2-dev \ - libsnappy-dev \ - libssl-dev \ - libtool \ - libzstd-dev \ - locales \ - make \ - maven \ - nodejs \ - node-yarn \ - npm \ - openjdk-11-jdk \ - openjdk-8-jdk \ - pinentry-curses \ - pkg-config \ - python3 \ - python3-pip \ - python3-pkg-resources \ - python3-setuptools \ - python3-wheel \ - rsync \ - shellcheck \ - software-properties-common \ - sudo \ - valgrind \ - zlib1g-dev \ + && apt-get -q install -y --no-install-recommends python3 \ + && apt-get -q install -y --no-install-recommends $(pkg-resolver/resolve.py ubuntu:focal) \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* @@ -97,89 +61,16 @@ ENV MAVEN_HOME /usr ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 ####### -# Install SpotBugs 4.2.2 +# Set env vars for SpotBugs 4.2.2 ####### -RUN mkdir -p /opt/spotbugs \ - && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ - -o /opt/spotbugs.tgz \ - && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ - && chmod +x /opt/spotbugs/bin/* ENV SPOTBUGS_HOME /opt/spotbugs ####### -# Install Boost 1.72 (1.71 ships with Focal) +# Set env vars for Google Protobuf 3.7.1 ####### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/boost-library \ - && curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ - && mv boost_1_72_0.tar.bz2 /opt/boost-library \ - && cd /opt/boost-library \ - && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ - && cd /opt/boost-library/boost_1_72_0 \ - && ./bootstrap.sh --prefix=/usr/ \ - && ./b2 --without-python install \ - && cd /root \ - && rm -rf /opt/boost-library - -###### -# Install Google Protobuf 3.7.1 (3.6.1 ships with Focal) -###### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/protobuf-src \ - && curl -L -s -S \ - https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \ - -o /opt/protobuf.tar.gz \ - && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \ - && cd /opt/protobuf-src \ - && ./configure --prefix=/opt/protobuf \ - && make "-j$(nproc)" \ - && make install \ - && cd /root \ - && rm -rf /opt/protobuf-src ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" -#### -# Install pylint and python-dateutil -#### -RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 - -#### -# Install bower -#### -# hadolint ignore=DL3008 -RUN npm install -g bower@1.8.8 - -### -# Install hadolint -#### -RUN curl -L -s -S \ - https://github.com/hadolint/hadolint/releases/download/v1.11.1/hadolint-Linux-x86_64 \ - -o /bin/hadolint \ - && chmod a+rx /bin/hadolint \ - && shasum -a 512 /bin/hadolint | \ - awk '$1!="734e37c1f6619cbbd86b9b249e69c9af8ee1ea87a2b1ff71dccda412e9dac35e63425225a95d71572091a3f0a11e9a04c2fc25d9e91b840530c26af32b9891ca" {exit(1)}' - -###### -# Intel ISA-L 2.29.0 -###### -# hadolint ignore=DL3003,DL3008 -RUN mkdir -p /opt/isa-l-src \ - && apt-get -q update \ - && apt-get install -y --no-install-recommends automake yasm \ - && apt-get clean \ - && curl -L -s -S \ - https://github.com/intel/isa-l/archive/v2.29.0.tar.gz \ - -o /opt/isa-l.tar.gz \ - && tar xzf /opt/isa-l.tar.gz --strip-components 1 -C /opt/isa-l-src \ - && cd /opt/isa-l-src \ - && ./autogen.sh \ - && ./configure \ - && make "-j$(nproc)" \ - && make install \ - && cd /root \ - && rm -rf /opt/isa-l-src - ### # Avoid out of memory errors in builds ### @@ -188,6 +79,16 @@ ENV MAVEN_OPTS -Xms256m -Xmx3072m # Skip gpg verification when downloading Yetus via yetus-wrapper ENV HADOOP_SKIP_YETUS_VERIFICATION true +#### +# Install packages +#### +RUN pkg-resolver/install-common-pkgs.sh +RUN pkg-resolver/install-spotbugs.sh ubuntu:focal +RUN pkg-resolver/install-boost.sh ubuntu:focal +RUN pkg-resolver/install-protobuf.sh ubuntu:focal +RUN pkg-resolver/install-hadolint.sh ubuntu:focal +RUN pkg-resolver/install-intel-isa-l.sh ubuntu:focal + ### # Everything past this point is either not needed for testing or breaks Yetus. # So tell Yetus not to read the rest of the file: diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index 5c2613268a65b..dd0348961f464 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -33,61 +33,19 @@ ENV DEBIAN_FRONTEND noninteractive ENV DEBCONF_TERSE true ###### -# Install common dependencies from packages. Versions here are either -# sufficient or irrelevant. +# Platform package dependency resolver ###### -# hadolint ignore=DL3008 +COPY pkg-resolver pkg-resolver +RUN chmod a+x pkg-resolver/*.sh pkg-resolver/*.py \ + && chmod a+r pkg-resolver/*.json + +###### +# Install packages from apt +###### +# hadolint ignore=DL3008,SC2046 RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends \ - ant \ - apt-utils \ - bats \ - build-essential \ - bzip2 \ - clang \ - cmake \ - curl \ - doxygen \ - fuse \ - g++ \ - gcc \ - git \ - gnupg-agent \ - hugo \ - libbcprov-java \ - libbz2-dev \ - libcurl4-openssl-dev \ - libfuse-dev \ - libprotobuf-dev \ - libprotoc-dev \ - libsasl2-dev \ - libsnappy-dev \ - libssl-dev \ - libtool \ - libzstd-dev \ - locales \ - make \ - maven \ - nodejs \ - node-yarn \ - npm \ - openjdk-11-jdk \ - openjdk-8-jdk \ - phantomjs \ - pinentry-curses \ - pkg-config \ - python2.7 \ - python3 \ - python3-pip \ - python3-pkg-resources \ - python3-setuptools \ - python3-wheel \ - rsync \ - shellcheck \ - software-properties-common \ - sudo \ - valgrind \ - zlib1g-dev \ + && apt-get -q install -y --no-install-recommends python3 \ + && apt-get -q install -y --no-install-recommends $(pkg-resolver/resolve.py ubuntu:focal::arch64) \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* @@ -103,59 +61,16 @@ ENV MAVEN_HOME /usr ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-arm64 ####### -# Install SpotBugs 4.2.2 +# Set env vars for SpotBugs 4.2.2 ####### -RUN mkdir -p /opt/spotbugs \ - && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ - -o /opt/spotbugs.tgz \ - && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ - && chmod +x /opt/spotbugs/bin/* ENV SPOTBUGS_HOME /opt/spotbugs ####### -# Install Boost 1.72 (1.71 ships with Focal) +# Set env vars for Google Protobuf 3.7.1 ####### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/boost-library \ - && curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ - && mv boost_1_72_0.tar.bz2 /opt/boost-library \ - && cd /opt/boost-library \ - && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ - && cd /opt/boost-library/boost_1_72_0 \ - && ./bootstrap.sh --prefix=/usr/ \ - && ./b2 --without-python install \ - && cd /root \ - && rm -rf /opt/boost-library - -###### -# Install Google Protobuf 3.7.1 (3.6.1 ships with Focal) -###### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/protobuf-src \ - && curl -L -s -S \ - https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \ - -o /opt/protobuf.tar.gz \ - && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \ - && cd /opt/protobuf-src \ - && ./configure --prefix=/opt/protobuf \ - && make "-j$(nproc)" \ - && make install \ - && cd /root \ - && rm -rf /opt/protobuf-src ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" -#### -# Install pylint and python-dateutil -#### -RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 - -#### -# Install bower -#### -# hadolint ignore=DL3008 -RUN npm install -g bower@1.8.8 - ### # Avoid out of memory errors in builds ### @@ -167,6 +82,14 @@ ENV HADOOP_SKIP_YETUS_VERIFICATION true # Force PhantomJS to be in 'headless' mode, do not connect to Xwindow ENV QT_QPA_PLATFORM offscreen +#### +# Install packages +#### +RUN pkg-resolver/install-common-pkgs.sh +RUN pkg-resolver/install-spotbugs.sh ubuntu:focal::arch64 +RUN pkg-resolver/install-boost.sh ubuntu:focal::arch64 +RUN pkg-resolver/install-protobuf.sh ubuntu:focal::arch64 + ### # Everything past this point is either not needed for testing or breaks Yetus. # So tell Yetus not to read the rest of the file: diff --git a/dev-support/docker/Dockerfile_centos_7 b/dev-support/docker/Dockerfile_centos_7 index 319942e2f6d4a..c74230e758cf6 100644 --- a/dev-support/docker/Dockerfile_centos_7 +++ b/dev-support/docker/Dockerfile_centos_7 @@ -23,79 +23,28 @@ WORKDIR /root SHELL ["/bin/bash", "-o", "pipefail", "-c"] +###### +# Platform package dependency resolver +###### +COPY pkg-resolver pkg-resolver +RUN chmod a+x pkg-resolver/*.sh pkg-resolver/*.py \ + && chmod a+r pkg-resolver/*.json + +###### +# Install packages from yum +###### +# hadolint ignore=DL3008,SC2046 RUN yum update -y \ - && yum install -y centos-release-scl \ - && yum install -y devtoolset-9 \ && yum install -y \ - ant \ - build-essential \ - bzip2 \ - bzip2-devel \ - clang \ - curl \ - cyrus-sasl-devel \ - doxygen \ - fuse \ - fuse-libs \ - fuse-devel \ - git \ - libcurl-devel \ - libtirpc-devel \ - libpmem-devel \ - libtool \ - lz4-devel \ - make \ - openssl-devel \ - pinentry-curses \ - python3 \ - python3-pip \ - python3-setuptools \ - python3-wheel \ - rsync \ - snappy-devel \ - sudo \ - valgrind \ - zlib-devel + centos-release-scl \ + python3 \ + && yum install -y $(pkg-resolver/resolve.py centos:7) # Set GCC 9 as the default C/C++ compiler RUN echo "source /opt/rh/devtoolset-9/enable" >> /etc/bashrc SHELL ["/bin/bash", "--login", "-c"] -#### -# Install Maven 3.6.3 -#### -RUN mkdir -p /opt/maven /tmp/maven \ - && curl -L -s -S https://mirrors.estointernet.in/apache/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz \ - -o /tmp/maven/apache-maven-3.6.3-bin.tar.gz \ - && tar xzf /tmp/maven/apache-maven-3.6.3-bin.tar.gz --strip-components 1 -C /opt/maven - -#### -# Install CMake 3.19 -#### -# hadolint ignore=DL3003 -RUN mkdir -p /tmp/cmake /opt/cmake \ - && curl -L -s -S https://cmake.org/files/v3.19/cmake-3.19.0.tar.gz -o /tmp/cmake/cmake-3.19.0.tar.gz \ - && tar xzf /tmp/cmake/cmake-3.19.0.tar.gz --strip-components 1 -C /opt/cmake \ - && cd /opt/cmake || exit && ./bootstrap \ - && make "-j$(nproc)" \ - && make install \ - && cd /root || exit - -#### -# Install zstandard -#### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/zstd /tmp/zstd \ - && curl -L -s -S https://github.com/facebook/zstd/archive/refs/tags/v1.4.9.tar.gz -o /tmp/zstd/v1.4.9.tar.gz \ - && tar xzf /tmp/zstd/v1.4.9.tar.gz --strip-components 1 -C /opt/zstd \ - && cd /opt/zstd || exit \ - && make "-j$(nproc)" \ - && make install \ - && cd /root || exit - -RUN locale-gen en_US.UTF-8 -ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8' -ENV PYTHONIOENCODING=utf-8 +# TODO: Set locale ###### # Set env vars required to build Hadoop @@ -106,68 +55,26 @@ ENV PATH "${PATH}:${MAVEN_HOME}/bin" ENV JAVA_HOME /usr/lib/jvm/java-1.8.0 ####### -# Install SpotBugs 4.2.2 +# Set env vars for SpotBugs ####### -RUN mkdir -p /opt/spotbugs \ - && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ - -o /opt/spotbugs.tgz \ - && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ - && chmod +x /opt/spotbugs/bin/* ENV SPOTBUGS_HOME /opt/spotbugs ####### -# Install Boost 1.72 (1.71 ships with Focal) +# Set env vars for Google Protobuf ####### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/boost-library \ - && curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ - && mv boost_1_72_0.tar.bz2 /opt/boost-library \ - && cd /opt/boost-library \ - && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ - && cd /opt/boost-library/boost_1_72_0 \ - && ./bootstrap.sh --prefix=/usr/ \ - && ./b2 --without-python install \ - && cd /root \ - && rm -rf /opt/boost-library - -###### -# Install Google Protobuf 3.7.1 (3.6.1 ships with Focal) -###### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/protobuf-src \ - && curl -L -s -S \ - https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \ - -o /opt/protobuf.tar.gz \ - && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \ - && cd /opt/protobuf-src \ - && ./configure --prefix=/opt/protobuf \ - && make "-j$(nproc)" \ - && make install \ - && cd /root \ - && rm -rf /opt/protobuf-src ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" -#### -# Install Node.js -#### -# hadolint ignore=DL3003 -RUN mkdir -p /tmp/node \ - && curl -L -s -S https://nodejs.org/dist/v14.16.1/node-v14.16.1.tar.gz -o /tmp/node-v14.16.1.tar.gz \ - && tar xzf /tmp/node-v14.16.1.tar.gz --strip-components 1 -C /tmp/node \ - && cd /tmp/node || exit \ - && ./configure \ - && make "-j$(nproc)" \ - && make install \ - && cd /root || exit -#### -# Install pylint and python-dateutil -#### -RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 - -#### -# Install bower -#### -# hadolint ignore=DL3008 -RUN npm install -g bower@1.8.8 +###### +# Install packages +###### +RUN pkg-resolver/install-maven.sh centos:7 +RUN pkg-resolver/install-cmake.sh centos:7 +RUN pkg-resolver/install-zstandard.sh centos:7 +RUN pkg-resolver/install-yasm.sh centos:7 +RUN pkg-resolver/install-protobuf.sh centos:7 +RUN pkg-resolver/install-boost.sh centos:7 +RUN pkg-resolver/install-spotbugs.sh centos:7 +RUN pkg-resolver/install-nodejs.sh centos:7 +RUN pkg-resolver/install-common-pkgs.sh diff --git a/dev-support/docker/Dockerfile_centos_8 b/dev-support/docker/Dockerfile_centos_8 index 7f1f6b4ac2239..b4109d825e47a 100644 --- a/dev-support/docker/Dockerfile_centos_8 +++ b/dev-support/docker/Dockerfile_centos_8 @@ -23,48 +23,30 @@ WORKDIR /root SHELL ["/bin/bash", "-o", "pipefail", "-c"] +###### +# Platform package dependency resolver +###### +COPY pkg-resolver pkg-resolver +RUN chmod a+x pkg-resolver/*.sh pkg-resolver/*.py \ + && chmod a+r pkg-resolver/*.json + +###### +# Install packages from yum +###### +# hadolint ignore=DL3008,SC2046 RUN yum update -y \ - && yum install -y \ - ant \ - bzip2 \ - bzip2-devel \ - clang \ - curl \ - cyrus-sasl-devel \ - dnf \ - fuse \ - git \ - libcurl-devel \ - fuse \ - fuse-libs \ - fuse-devel \ - libtool \ - libtirpc-devel \ - lz4-devel \ - make \ - openssl-devel \ - pinentry-curses \ - pkg-config \ - python3 \ - python3-pip \ - python3-setuptools \ - python3-wheel \ - rsync \ - sudo \ - valgrind \ - zlib-devel + && yum install -y python3 \ + && yum install -y $(pkg-resolver/resolve.py centos:8) #### # Install EPEL #### -RUN mkdir -p /tmp/epel \ - && curl -L -s -S https://download-ib01.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \ - -o /tmp/epel/epel-release-latest-8.noarch.rpm \ - && rpm -Uvh /tmp/epel/epel-release-latest-8.noarch.rpm +RUN pkg-resolver/install-epel.sh centos:8 RUN dnf --enablerepo=powertools install -y \ doxygen \ - snappy-devel + snappy-devel \ + yasm RUN dnf install -y \ bouncycastle \ @@ -78,100 +60,34 @@ RUN dnf install -y \ RUN echo "source /opt/rh/gcc-toolset-9/enable" >> /etc/bashrc SHELL ["/bin/bash", "--login", "-c"] -#### -# Install Maven 3.6.3 -#### -RUN mkdir -p /opt/maven /tmp/maven \ - && curl -L -s -S https://mirrors.estointernet.in/apache/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz \ - -o /tmp/maven/apache-maven-3.6.3-bin.tar.gz \ - && tar xzf /tmp/maven/apache-maven-3.6.3-bin.tar.gz --strip-components 1 -C /opt/maven - -#### -# Install CMake 3.19 -#### -# hadolint ignore=DL3003 -RUN mkdir -p /tmp/cmake /opt/cmake \ - && curl -L -s -S https://cmake.org/files/v3.19/cmake-3.19.0.tar.gz -o /tmp/cmake/cmake-3.19.0.tar.gz \ - && tar xzf /tmp/cmake/cmake-3.19.0.tar.gz --strip-components 1 -C /opt/cmake \ - && cd /opt/cmake || exit && ./bootstrap \ - && make "-j$(nproc)" \ - && make install \ - && cd /root || exit - -#### -# Install zstandard -#### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/zstd /tmp/zstd \ - && curl -L -s -S https://github.com/facebook/zstd/archive/refs/tags/v1.4.9.tar.gz -o /tmp/zstd/v1.4.9.tar.gz \ - && tar xzf /tmp/zstd/v1.4.9.tar.gz --strip-components 1 -C /opt/zstd \ - && cd /opt/zstd || exit \ - && make "-j$(nproc)" \ - && make install \ - && cd /root || exit - -RUN locale-gen en_US.UTF-8 -ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8' -ENV PYTHONIOENCODING=utf-8 +# TODO: Set locale ###### # Set env vars required to build Hadoop ###### -ENV MAVEN_HOME /opt/maven/apache-maven-3.6.3 +ENV MAVEN_HOME /opt/maven ENV PATH "${PATH}:${MAVEN_HOME}/bin" # JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) ENV JAVA_HOME /usr/lib/jvm/java-1.8.0 ####### -# Install SpotBugs 4.2.2 +# Set env vars for SpotBugs ####### -RUN mkdir -p /opt/spotbugs \ - && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ - -o /opt/spotbugs.tgz \ - && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ - && chmod +x /opt/spotbugs/bin/* ENV SPOTBUGS_HOME /opt/spotbugs ####### -# Install Boost 1.72 +# Set env vars for Google Protobuf ####### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/boost-library \ - && curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download > boost_1_72_0.tar.bz2 \ - && mv boost_1_72_0.tar.bz2 /opt/boost-library \ - && cd /opt/boost-library \ - && tar --bzip2 -xf boost_1_72_0.tar.bz2 \ - && cd /opt/boost-library/boost_1_72_0 \ - && ./bootstrap.sh --prefix=/usr/ \ - && ./b2 --without-python install \ - && cd /root \ - && rm -rf /opt/boost-library - -###### -# Install Google Protobuf 3.7.1 -###### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/protobuf-src \ - && curl -L -s -S \ - https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \ - -o /opt/protobuf.tar.gz \ - && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \ - && cd /opt/protobuf-src \ - && ./configure --prefix=/opt/protobuf \ - && make "-j$(nproc)" \ - && make install \ - && cd /root \ - && rm -rf /opt/protobuf-src ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" -#### -# Install pylint and python-dateutil -#### -RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 - -#### -# Install bower -#### -# hadolint ignore=DL3008 -RUN npm install -g bower@1.8.8 +###### +# Install packages +###### +RUN pkg-resolver/install-maven.sh centos:8 +RUN pkg-resolver/install-cmake.sh centos:8 +RUN pkg-resolver/install-boost.sh centos:8 +RUN pkg-resolver/install-spotbugs.sh centos:8 +RUN pkg-resolver/install-protobuf.sh centos:8 +RUN pkg-resolver/install-zstandard.sh centos:8 +RUN pkg-resolver/install-common-pkgs.sh diff --git a/dev-support/docker/README.md b/dev-support/docker/README.md new file mode 100644 index 0000000000000..32269c0929a02 --- /dev/null +++ b/dev-support/docker/README.md @@ -0,0 +1,74 @@ + + +# Docker images for building Hadoop + +This folder contains the Dockerfiles for building Hadoop on various platforms. + +# Dependency management + +The mode of installation of the dependencies needed for building Hadoop varies from one platform to +the other. Different platforms have different toolchains. Some packages tend to be polymorphic +across platforms and most commonly, a package that's readily available in one platform's toolchain +isn't available on another. We thus, resort to building and installing the package from source, +causing duplication of code since this needs to be done for all the Dockerfiles pertaining to all +the platforms. We need a system to track a dependency - for a package - for a platform. Thus, +there's a lot of diversity that needs to be handled for managing package dependencies and +`pkg-resolver` caters to that. + +## Supported platforms + +`pkg-resolver/platforms.json` contains a list of the supported platforms for dependency management. + +## Package dependencies + +`pkg-resolver/packages.json` maps a dependency to a given platform. Here's the schema of this JSON. + +```json +{ + "dependency_1": { + "platform_1": "package_1", + "platform_2": [ + "package_1", + "package_2" + ] + }, + "dependency_2": { + "platform_1": [ + "package_1", + "package_2", + "package_3" + ] + } +} +``` + +The root JSON element contains unique _dependency_ children. This in turn contains the name of the _ +platforms_ and the list of _packages_ to be installed for that platform. Just to give an example of +how to interpret the above JSON - + +1. For `dependency_1`, `package_1` needs to be installed for `platform_1`. +2. For `dependency_2`, `package_1` and `package_2` needs to be installed for `platform_2`. +3. For `dependency_2`, `package_1`, `package_3` and `package_3` needs to be installed for + `platform_1`. + +## Standalone packages + +Most commonly, some packages are not available across the toolchains in various platforms. Thus, we +would need to build and install them. Since we need to do this across all the Dockerfiles for all +the platforms, it could lead to code duplication and managing them becomes a hassle. Thus, we put +the build steps in a `pkg-resolver/install-.sh` and invoke this in all the Dockerfiles. \ No newline at end of file diff --git a/dev-support/docker/pkg-resolver/check_platform.py b/dev-support/docker/pkg-resolver/check_platform.py new file mode 100644 index 0000000000000..fa5529a58be20 --- /dev/null +++ b/dev-support/docker/pkg-resolver/check_platform.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Checks whether the given platform is supported for building Apache Hadoop +""" + +import json +import sys + + +def get_platforms(): + """ + :return: A list of the supported platforms managed by pkg-resolver. + """ + + with open('pkg-resolver/platforms.json', encoding='utf-8', mode='r') as platforms_file: + return json.loads(platforms_file.read()) + + +def is_supported_platform(platform): + """ + :param platform: The name of the platform + :return: Whether the platform is supported + """ + return platform in get_platforms() + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print('ERROR: Expecting 1 argument, {} were provided'.format(len(sys.argv) - 1), + file=sys.stderr) + sys.exit(1) + + sys.exit(0 if is_supported_platform(sys.argv[1]) else 1) diff --git a/dev-support/docker/pkg-resolver/install-boost.sh b/dev-support/docker/pkg-resolver/install-boost.sh new file mode 100644 index 0000000000000..eaca09effa2c0 --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-boost.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +default_version="1.72.0" +version_to_install=$default_version +if [ -n "$2" ]; then + version_to_install="$2" +fi + +if [ "$version_to_install" != "1.72.0" ]; then + echo "WARN: Don't know how to install version $version_to_install, installing the default version $default_version instead" + version_to_install=$default_version +fi + +if [ "$version_to_install" == "1.72.0" ]; then + # hadolint ignore=DL3003 + mkdir -p /opt/boost-library && + curl -L https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.bz2/download >boost_1_72_0.tar.bz2 && + mv boost_1_72_0.tar.bz2 /opt/boost-library && + cd /opt/boost-library && + tar --bzip2 -xf boost_1_72_0.tar.bz2 && + cd /opt/boost-library/boost_1_72_0 && + ./bootstrap.sh --prefix=/usr/ && + ./b2 --without-python install && + cd /root && + rm -rf /opt/boost-library +else + echo "ERROR: Don't know how to install version $version_to_install" + exit 1 +fi diff --git a/dev-support/docker/pkg-resolver/install-cmake.sh b/dev-support/docker/pkg-resolver/install-cmake.sh new file mode 100644 index 0000000000000..29e2733e70196 --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-cmake.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +default_version="3.19.0" +version_to_install=$default_version +if [ -n "$2" ]; then + version_to_install="$2" +fi + +if [ "$version_to_install" != "3.19.0" ]; then + echo "WARN: Don't know how to install version $version_to_install, installing the default version $default_version instead" + version_to_install=$default_version +fi + +if [ "$version_to_install" == "3.19.0" ]; then + # hadolint ignore=DL3003 + mkdir -p /tmp/cmake /opt/cmake && + curl -L -s -S https://cmake.org/files/v3.19/cmake-3.19.0.tar.gz -o /tmp/cmake/cmake-3.19.0.tar.gz && + tar xzf /tmp/cmake/cmake-3.19.0.tar.gz --strip-components 1 -C /opt/cmake && + cd /opt/cmake || exit && ./bootstrap && + make "-j$(nproc)" && + make install && + cd /root || exit +else + echo "ERROR: Don't know how to install version $version_to_install" + exit 1 +fi diff --git a/dev-support/docker/pkg-resolver/install-common-pkgs.sh b/dev-support/docker/pkg-resolver/install-common-pkgs.sh new file mode 100644 index 0000000000000..d6756eea73d79 --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-common-pkgs.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +###### +# Install pylint and python-dateutil +###### +pip3 install pylint==2.6.0 python-dateutil==2.8.1 + +###### +# Install bower +###### +# hadolint ignore=DL3008 +npm install -g bower@1.8.8 diff --git a/dev-support/docker/pkg-resolver/install-epel.sh b/dev-support/docker/pkg-resolver/install-epel.sh new file mode 100644 index 0000000000000..875dce3a9ae85 --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-epel.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +default_version="8" +version_to_install=$default_version +if [ -n "$2" ]; then + version_to_install="$2" +fi + +if [ "$version_to_install" != "8" ]; then + echo "WARN: Don't know how to install version $version_to_install, installing the default version $default_version instead" + version_to_install=$default_version +fi + +if [ "$version_to_install" == "8" ]; then + mkdir -p /tmp/epel && + curl -L -s -S https://download-ib01.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \ + -o /tmp/epel/epel-release-latest-8.noarch.rpm && + rpm -Uvh /tmp/epel/epel-release-latest-8.noarch.rpm +else + echo "ERROR: Don't know how to install version $version_to_install" + exit 1 +fi diff --git a/dev-support/docker/pkg-resolver/install-hadolint.sh b/dev-support/docker/pkg-resolver/install-hadolint.sh new file mode 100644 index 0000000000000..1e2081f38c403 --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-hadolint.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +curl -L -s -S \ + https://github.com/hadolint/hadolint/releases/download/v1.11.1/hadolint-Linux-x86_64 \ + -o /bin/hadolint && + chmod a+rx /bin/hadolint && + shasum -a 512 /bin/hadolint | + awk '$1!="734e37c1f6619cbbd86b9b249e69c9af8ee1ea87a2b1ff71dccda412e9dac35e63425225a95d71572091a3f0a11e9a04c2fc25d9e91b840530c26af32b9891ca" {exit(1)}' diff --git a/dev-support/docker/pkg-resolver/install-intel-isa-l.sh b/dev-support/docker/pkg-resolver/install-intel-isa-l.sh new file mode 100644 index 0000000000000..c6b4de782282e --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-intel-isa-l.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +default_version="2.29.0" +version_to_install=$default_version +if [ -n "$2" ]; then + version_to_install="$2" +fi + +if [ "$version_to_install" != "2.29.0" ]; then + echo "WARN: Don't know how to install version $version_to_install, installing the default version $default_version instead" + version_to_install=$default_version +fi + +if [ "$version_to_install" == "2.29.0" ]; then + # hadolint ignore=DL3003,DL3008 + mkdir -p /opt/isa-l-src && + curl -L -s -S \ + https://github.com/intel/isa-l/archive/v2.29.0.tar.gz \ + -o /opt/isa-l.tar.gz && + tar xzf /opt/isa-l.tar.gz --strip-components 1 -C /opt/isa-l-src && + cd /opt/isa-l-src && + ./autogen.sh && + ./configure && + make "-j$(nproc)" && + make install && + cd /root && + rm -rf /opt/isa-l-src +else + echo "ERROR: Don't know how to install version $version_to_install" + exit 1 +fi diff --git a/dev-support/docker/pkg-resolver/install-maven.sh b/dev-support/docker/pkg-resolver/install-maven.sh new file mode 100644 index 0000000000000..f9ff961a190f9 --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-maven.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +default_version="3.6.3" +version_to_install=$default_version +if [ -n "$2" ]; then + version_to_install="$2" +fi + +if [ "$version_to_install" != "3.6.3" ]; then + echo "WARN: Don't know how to install version $version_to_install, installing the default version $default_version instead" + version_to_install=$default_version +fi + +if [ "$version_to_install" == "3.6.3" ]; then + mkdir -p /opt/maven /tmp/maven && + curl -L -s -S https://mirrors.estointernet.in/apache/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz \ + -o /tmp/maven/apache-maven-3.6.3-bin.tar.gz && + tar xzf /tmp/maven/apache-maven-3.6.3-bin.tar.gz --strip-components 1 -C /opt/maven +else + echo "ERROR: Don't know how to install version $version_to_install" + exit 1 +fi diff --git a/dev-support/docker/pkg-resolver/install-nodejs.sh b/dev-support/docker/pkg-resolver/install-nodejs.sh new file mode 100644 index 0000000000000..5ba1c22808640 --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-nodejs.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +default_version="14.16.1" +version_to_install=$default_version +if [ -n "$2" ]; then + version_to_install="$2" +fi + +if [ "$version_to_install" != "14.16.1" ]; then + echo "WARN: Don't know how to install version $version_to_install, installing the default version $default_version instead" + version_to_install=$default_version +fi + +if [ "$version_to_install" == "14.16.1" ]; then + # hadolint ignore=DL3003 + mkdir -p /tmp/node && + curl -L -s -S https://nodejs.org/dist/v14.16.1/node-v14.16.1.tar.gz -o /tmp/node-v14.16.1.tar.gz && + tar xzf /tmp/node-v14.16.1.tar.gz --strip-components 1 -C /tmp/node && + cd /tmp/node || exit && + ./configure && + make "-j$(nproc)" && + make install && + cd /root || exit +else + echo "ERROR: Don't know how to install version $version_to_install" + exit 1 +fi diff --git a/dev-support/docker/pkg-resolver/install-protobuf.sh b/dev-support/docker/pkg-resolver/install-protobuf.sh new file mode 100644 index 0000000000000..7303b4048226a --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-protobuf.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +default_version="3.7.1" +version_to_install=$default_version +if [ -n "$2" ]; then + version_to_install="$2" +fi + +if [ "$version_to_install" != "3.7.1" ]; then + echo "WARN: Don't know how to install version $version_to_install, installing the default version $default_version instead" + version_to_install=$default_version +fi + +if [ "$version_to_install" == "3.7.1" ]; then + # hadolint ignore=DL3003 + mkdir -p /opt/protobuf-src && + curl -L -s -S \ + https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \ + -o /opt/protobuf.tar.gz && + tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src && + cd /opt/protobuf-src && + ./configure --prefix=/opt/protobuf && + make "-j$(nproc)" && + make install && + cd /root && + rm -rf /opt/protobuf-src +else + echo "ERROR: Don't know how to install version $version_to_install" + exit 1 +fi diff --git a/dev-support/docker/pkg-resolver/install-spotbugs.sh b/dev-support/docker/pkg-resolver/install-spotbugs.sh new file mode 100644 index 0000000000000..65a8f2e692418 --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-spotbugs.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +default_version="4.2.2" +version_to_install=$default_version +if [ -n "$2" ]; then + version_to_install="$2" +fi + +if [ "$version_to_install" != "4.2.2" ]; then + echo "WARN: Don't know how to install version $version_to_install, installing the default version $default_version instead" + version_to_install=$default_version +fi + +if [ "$version_to_install" == "4.2.2" ]; then + mkdir -p /opt/spotbugs && + curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ + -o /opt/spotbugs.tgz && + tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs && + chmod +x /opt/spotbugs/bin/* +else + echo "ERROR: Don't know how to install version $version_to_install" + exit 1 +fi diff --git a/dev-support/docker/pkg-resolver/install-yasm.sh b/dev-support/docker/pkg-resolver/install-yasm.sh new file mode 100644 index 0000000000000..a5f6162bc38d7 --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-yasm.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +default_version="1.2.0-4" +version_to_install=$default_version +if [ -n "$2" ]; then + version_to_install="$2" +fi + +if [ "$version_to_install" != "1.2.0-4" ]; then + echo "WARN: Don't know how to install version $version_to_install, installing the default version $default_version instead" + version_to_install=$default_version +fi + +if [ "$version_to_install" == "1.2.0-4" ]; then + mkdir -p /tmp/yasm && + curl -L -s -S https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/y/yasm-1.2.0-4.el7.x86_64.rpm \ + -o /tmp/yasm-1.2.0-4.el7.x86_64.rpm && + rpm -Uvh /tmp/yasm-1.2.0-4.el7.x86_64.rpm +else + echo "ERROR: Don't know how to install version $version_to_install" + exit 1 +fi diff --git a/dev-support/docker/pkg-resolver/install-zstandard.sh b/dev-support/docker/pkg-resolver/install-zstandard.sh new file mode 100644 index 0000000000000..3aafd469d2be3 --- /dev/null +++ b/dev-support/docker/pkg-resolver/install-zstandard.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; then + echo "ERROR: Need at least 1 argument, $# were provided" + exit 1 +fi + +pkg-resolver/check_platform.py "$1" +if [ $? -eq 1 ]; then + echo "ERROR: Unsupported platform $1" + exit 1 +fi + +default_version="1.4.9" +version_to_install=$default_version +if [ -n "$2" ]; then + version_to_install="$2" +fi + +if [ "$version_to_install" != "1.4.9" ]; then + echo "WARN: Don't know how to install version $version_to_install, installing the default version $default_version instead" + version_to_install=$default_version +fi + +if [ "$version_to_install" == "1.4.9" ]; then + # hadolint ignore=DL3003 + mkdir -p /opt/zstd /tmp/zstd && + curl -L -s -S https://github.com/facebook/zstd/archive/refs/tags/v1.4.9.tar.gz -o /tmp/zstd/v1.4.9.tar.gz && + tar xzf /tmp/zstd/v1.4.9.tar.gz --strip-components 1 -C /opt/zstd && + cd /opt/zstd || exit && + make "-j$(nproc)" && + make install && + cd /root || exit +else + echo "ERROR: Don't know how to install version $version_to_install" + exit 1 +fi diff --git a/dev-support/docker/pkg-resolver/packages.json b/dev-support/docker/pkg-resolver/packages.json new file mode 100644 index 0000000000000..4ebbc900fe4de --- /dev/null +++ b/dev-support/docker/pkg-resolver/packages.json @@ -0,0 +1,302 @@ +{ + "ant": { + "ubuntu:focal": "ant", + "ubuntu:focal::arch64": "ant", + "centos:7": "ant", + "centos:8": "ant" + }, + "apt-utils": { + "ubuntu:focal": "apt-utils", + "ubuntu:focal::arch64": "apt-utils" + }, + "automake": { + "ubuntu:focal": "automake", + "ubuntu:focal::arch64": "automake", + "centos:7": "automake", + "centos:8": "automake" + }, + "bats": { + "ubuntu:focal": "bats", + "ubuntu:focal::arch64": "bats" + }, + "build-essential": { + "ubuntu:focal": "build-essential", + "ubuntu:focal::arch64": "build-essential", + "centos:7": "build-essential" + }, + "bzip2": { + "ubuntu:focal": [ + "bzip2", + "libbz2-dev" + ], + "ubuntu:focal::arch64": [ + "bzip2", + "libbz2-dev" + ], + "centos:7": [ + "bzip2", + "bzip2-devel" + ], + "centos:8": [ + "bzip2", + "bzip2-devel" + ] + }, + "clang": { + "ubuntu:focal": "clang", + "ubuntu:focal::arch64": "clang", + "centos:7": "clang", + "centos:8": "clang" + }, + "cmake": { + "ubuntu:focal": "cmake", + "ubuntu:focal::arch64": "cmake" + }, + "curl": { + "ubuntu:focal": [ + "curl", + "libcurl4-openssl-dev" + ], + "ubuntu:focal::arch64": [ + "curl", + "libcurl4-openssl-dev" + ], + "centos:7": [ + "curl", + "libcurl-devel" + ], + "centos:8": [ + "curl", + "libcurl-devel" + ] + }, + "doxygen": { + "ubuntu:focal": "doxygen", + "ubuntu:focal::arch64": "doxygen", + "centos:7": "doxygen" + }, + "dnf": { + "centos:8": "dnf" + }, + "fuse": { + "ubuntu:focal": [ + "fuse", + "libfuse-dev" + ], + "ubuntu:focal::arch64": [ + "fuse", + "libfuse-dev" + ], + "centos:7": [ + "fuse", + "fuse-libs", + "fuse-devel" + ], + "centos:8": [ + "fuse", + "fuse-libs", + "fuse-devel" + ] + }, + "gcc": { + "ubuntu:focal": [ + "gcc", + "g++" + ], + "ubuntu:focal::arch64": [ + "gcc", + "g++" + ], + "centos:7": [ + "centos-release-scl", + "devtoolset-9" + ] + }, + "git": { + "ubuntu:focal": "git", + "ubuntu:focal::arch64": "git", + "centos:7": "git", + "centos:8": "git" + }, + "gnupg-agent": { + "ubuntu:focal": "gnupg-agent", + "ubuntu:focal::arch64": "gnupg-agent" + }, + "hugo": { + "ubuntu:focal": "hugo", + "ubuntu:focal::arch64": "hugo" + }, + "libbcprov-java": { + "ubuntu:focal": "libbcprov-java", + "ubuntu:focal::arch64": "libbcprov-java" + }, + "libtool": { + "ubuntu:focal": "libtool", + "ubuntu:focal::arch64": "libtool", + "centos:7": "libtool", + "centos:8": "libtool" + }, + "openssl": { + "ubuntu:focal": "libssl-dev", + "ubuntu:focal::arch64": "libssl-dev", + "centos:7": "openssl-devel", + "centos:8": "openssl-devel" + }, + "protocol-buffers": { + "ubuntu:focal": [ + "libprotobuf-dev", + "libprotoc-dev" + ], + "ubuntu:focal::arch64": [ + "libprotobuf-dev", + "libprotoc-dev" + ] + }, + "sasl": { + "ubuntu:focal": "libsasl2-dev", + "ubuntu:focal::arch64": "libsasl2-dev", + "centos:7": "cyrus-sasl-devel", + "centos:8": "cyrus-sasl-devel" + }, + "snappy": { + "ubuntu:focal": "libsnappy-dev", + "ubuntu:focal::arch64": "libsnappy-dev", + "centos:7": "snappy-devel" + }, + "zlib": { + "ubuntu:focal": [ + "libzstd-dev", + "zlib1g-dev" + ], + "ubuntu:focal::arch64": [ + "libzstd-dev", + "zlib1g-dev" + ], + "centos:7": [ + "zlib-devel", + "lz4-devel" + ], + "centos:8": [ + "zlib-devel", + "lz4-devel" + ] + }, + "locales": { + "ubuntu:focal": "locales", + "ubuntu:focal::arch64": "locales" + }, + "libtirpc-devel": { + "centos:7": "libtirpc-devel", + "centos:8": "libtirpc-devel" + }, + "libpmem": { + "centos:7": "libpmem-devel" + }, + "make": { + "ubuntu:focal": "make", + "ubuntu:focal::arch64": "make", + "centos:7": "make", + "centos:8": "make" + }, + "maven": { + "ubuntu:focal": "maven", + "ubuntu:focal::arch64": "maven" + }, + "javascript": { + "ubuntu:focal": [ + "nodejs", + "node-yarn", + "npm" + ], + "ubuntu:focal::arch64": [ + "nodejs", + "node-yarn", + "npm", + "phantomjs" + ] + }, + "java": { + "ubuntu:focal": [ + "openjdk-8-jdk", + "openjdk-11-jdk" + ], + "ubuntu:focal::arch64": [ + "openjdk-8-jdk", + "openjdk-11-jdk" + ] + }, + "pinentry-curses": { + "ubuntu:focal": "pinentry-curses", + "ubuntu:focal::arch64": "pinentry-curses", + "centos:7": "pinentry-curses", + "centos:8": "pinentry-curses" + }, + "pkg-config": { + "ubuntu:focal": "pkg-config", + "ubuntu:focal::arch64": "pkg-config", + "centos:8": "pkg-config" + }, + "python": { + "ubuntu:focal": [ + "python3", + "python3-pip", + "python3-pkg-resources", + "python3-setuptools", + "python3-wheel" + ], + "ubuntu:focal::arch64": [ + "python2.7", + "python3", + "python3-pip", + "python3-pkg-resources", + "python3-setuptools", + "python3-wheel" + ], + "centos:7": [ + "python3", + "python3-pip", + "python3-setuptools", + "python3-wheel" + ], + "centos:8": [ + "python3", + "python3-pip", + "python3-setuptools", + "python3-wheel" + ] + }, + "rsync": { + "ubuntu:focal": "rsync", + "ubuntu:focal::arch64": "rsync", + "centos:7": "rsync", + "centos:8": "rsync" + }, + "shellcheck": { + "ubuntu:focal": "shellcheck", + "ubuntu:focal::arch64": "shellcheck" + }, + "shasum": { + "centos:7": "perl-Digest-SHA", + "centos:8": "perl-Digest-SHA" + }, + "software-properties-common": { + "ubuntu:focal": "software-properties-common", + "ubuntu:focal::arch64": "software-properties-common" + }, + "sudo": { + "ubuntu:focal": "sudo", + "ubuntu:focal::arch64": "sudo", + "centos:7": "sudo", + "centos:8": "sudo" + }, + "valgrind": { + "ubuntu:focal": "valgrind", + "ubuntu:focal::arch64": "valgrind", + "centos:7": "valgrind", + "centos:8": "valgrind" + }, + "yasm": { + "ubuntu:focal": "yasm", + "ubuntu:focal::arch64": "yasm" + } +} \ No newline at end of file diff --git a/dev-support/docker/pkg-resolver/platforms.json b/dev-support/docker/pkg-resolver/platforms.json new file mode 100644 index 0000000000000..4861c5dd55c00 --- /dev/null +++ b/dev-support/docker/pkg-resolver/platforms.json @@ -0,0 +1,6 @@ +[ + "ubuntu:focal", + "ubuntu:focal::arch64", + "centos:7", + "centos:8" +] \ No newline at end of file diff --git a/dev-support/docker/pkg-resolver/resolve.py b/dev-support/docker/pkg-resolver/resolve.py new file mode 100644 index 0000000000000..5d62edd323e78 --- /dev/null +++ b/dev-support/docker/pkg-resolver/resolve.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Platform package dependency resolver for building Apache Hadoop. +""" + +import json +import sys +from check_platform import is_supported_platform + + +def get_packages(platform): + """ + Resolve and get the list of packages to install for the given platform. + + :param platform: The platform for which the packages needs to be resolved. + :return: A list of resolved packages to install. + """ + with open('pkg-resolver/packages.json', encoding='utf-8', mode='r') as pkg_file: + pkgs = json.loads(pkg_file.read()) + packages = [] + for platforms in filter(lambda x: x.get(platform) is not None, pkgs.values()): + if isinstance(platforms.get(platform), list): + packages.extend(platforms.get(platform)) + else: + packages.append(platforms.get(platform)) + return packages + + +if __name__ == '__main__': + if len(sys.argv) < 2: + print('ERROR: Need at least 1 argument, {} were provided'.format(len(sys.argv) - 1), + file=sys.stderr) + sys.exit(1) + + platform_arg = sys.argv[1] + if not is_supported_platform(platform_arg): + print( + 'ERROR: The given platform {} is not supported. ' + 'Please refer to platforms.json for a list of supported platforms'.format( + platform_arg), file=sys.stderr) + sys.exit(1) + + packages_to_install = get_packages(platform_arg) + print(' '.join(packages_to_install)) From 90d453863d725ca1f06e0fcbbdb3cfed00fd8dd8 Mon Sep 17 00:00:00 2001 From: litao Date: Tue, 8 Jun 2021 15:14:06 +0800 Subject: [PATCH 387/666] HDFS-16048. RBF: Print network topology on the router web (#3062) Reviewed-by: Inigo Goiri Reviewed-by: Hemanth Boyina Reviewed-by: Akira Ajisaka --- .../federation/router/RouterHttpServer.java | 3 + .../router/RouterNetworkTopologyServlet.java | 69 ++++++ .../src/main/webapps/router/explorer.html | 1 + .../main/webapps/router/federationhealth.html | 1 + .../TestRouterNetworkTopologyServlet.java | 210 ++++++++++++++++++ .../server/namenode/NameNodeHttpServer.java | 2 +- .../namenode/NetworkTopologyServlet.java | 9 +- 7 files changed, 290 insertions(+), 5 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNetworkTopologyServlet.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java index b1fcc0c6b4c0b..85044399f9815 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java @@ -125,6 +125,9 @@ private static void setupServlets( RouterFsckServlet.PATH_SPEC, RouterFsckServlet.class, true); + httpServer.addInternalServlet(RouterNetworkTopologyServlet.SERVLET_NAME, + RouterNetworkTopologyServlet.PATH_SPEC, + RouterNetworkTopologyServlet.class); } public InetSocketAddress getHttpAddress() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNetworkTopologyServlet.java new file mode 100644 index 0000000000000..e517066c81c20 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNetworkTopologyServlet.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.NetworkTopologyServlet; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.util.StringUtils; + +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.io.PrintStream; +import java.util.Arrays; +import java.util.List; + +/** + * A servlet to print out the network topology from router. + */ +public class RouterNetworkTopologyServlet extends NetworkTopologyServlet { + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws IOException { + final ServletContext context = getServletContext(); + + String format = parseAcceptHeader(request); + if (FORMAT_TEXT.equals(format)) { + response.setContentType("text/plain; charset=UTF-8"); + } else if (FORMAT_JSON.equals(format)) { + response.setContentType("application/json; charset=UTF-8"); + } + + Router router = RouterHttpServer.getRouterFromContext(context); + DatanodeInfo[] datanodeReport = + router.getRpcServer().getDatanodeReport( + HdfsConstants.DatanodeReportType.ALL); + List datanodeInfos = Arrays.asList(datanodeReport); + + try (PrintStream out = new PrintStream( + response.getOutputStream(), false, "UTF-8")) { + printTopology(out, datanodeInfos, format); + } catch (Throwable t) { + String errMsg = "Print network topology failed. " + + StringUtils.stringifyException(t); + response.sendError(HttpServletResponse.SC_GONE, errMsg); + throw new IOException(errMsg); + } finally { + response.getOutputStream().close(); + } + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html index 80b38e7165f78..49c3e6606accc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html @@ -48,6 +48,7 @@

  • Metrics
  • Configuration
  • Process Thread Dump
  • +
  • Network Topology
  • diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html index eca395ff4b2a9..80b4b3b39f903 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html @@ -52,6 +52,7 @@
  • Metrics
  • Configuration
  • Process Thread Dump
  • +
  • Network Topology
  • diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java new file mode 100644 index 0000000000000..e120c69007ee5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java @@ -0,0 +1,210 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver; +import org.apache.hadoop.io.IOUtils; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Iterator; +import java.util.Map; + +import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestRouterNetworkTopologyServlet { + + private static StateStoreDFSCluster clusterWithDatanodes; + private static StateStoreDFSCluster clusterNoDatanodes; + + @BeforeClass + public static void setUp() throws Exception { + // Builder configuration + Configuration routerConf = + new RouterConfigBuilder().stateStore().admin().quota().rpc().build(); + routerConf.set(DFS_ROUTER_HTTP_ENABLE, "true"); + Configuration hdfsConf = new Configuration(false); + + // Build and start a federated cluster + clusterWithDatanodes = new StateStoreDFSCluster(false, 2, + MultipleDestinationMountTableResolver.class); + clusterWithDatanodes.addNamenodeOverrides(hdfsConf); + clusterWithDatanodes.addRouterOverrides(routerConf); + clusterWithDatanodes.setNumDatanodesPerNameservice(9); + clusterWithDatanodes.setIndependentDNs(); + clusterWithDatanodes.setRacks( + new String[] {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2", + "/rack2", "/rack3", "/rack3", "/rack3", "/rack4", "/rack4", + "/rack4", "/rack5", "/rack5", "/rack5", "/rack6", "/rack6", + "/rack6"}); + clusterWithDatanodes.startCluster(); + clusterWithDatanodes.startRouters(); + clusterWithDatanodes.waitClusterUp(); + clusterWithDatanodes.waitActiveNamespaces(); + + // Build and start a federated cluster + clusterNoDatanodes = new StateStoreDFSCluster(false, 2, + MultipleDestinationMountTableResolver.class); + clusterNoDatanodes.addNamenodeOverrides(hdfsConf); + clusterNoDatanodes.addRouterOverrides(routerConf); + clusterNoDatanodes.setNumDatanodesPerNameservice(0); + clusterNoDatanodes.setIndependentDNs(); + clusterNoDatanodes.startCluster(); + clusterNoDatanodes.startRouters(); + clusterNoDatanodes.waitClusterUp(); + clusterNoDatanodes.waitActiveNamespaces(); + } + + @Test + public void testPrintTopologyTextFormat() throws Exception { + // get http Address + String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter() + .getHttpServerAddress().toString(); + + // send http request + URL url = new URL("http:/" + httpAddress + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.connect(); + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert rack info + assertTrue(topology.contains("/ns0/rack1")); + assertTrue(topology.contains("/ns0/rack2")); + assertTrue(topology.contains("/ns0/rack3")); + assertTrue(topology.contains("/ns1/rack4")); + assertTrue(topology.contains("/ns1/rack5")); + assertTrue(topology.contains("/ns1/rack6")); + + // assert node number + assertEquals(18, + topology.split("127.0.0.1").length - 1); + } + + @Test + public void testPrintTopologyJsonFormat() throws Exception { + // get http Address + String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter() + .getHttpServerAddress().toString(); + + // send http request + URL url = new URL("http:/" + httpAddress + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.setRequestProperty("Accept", "application/json"); + conn.connect(); + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + String topology = out.toString(); + + // parse json + JsonNode racks = new ObjectMapper().readTree(topology); + + // assert rack number + assertEquals(6, racks.size()); + + // assert rack info + assertTrue(topology.contains("/ns0/rack1")); + assertTrue(topology.contains("/ns0/rack2")); + assertTrue(topology.contains("/ns0/rack3")); + assertTrue(topology.contains("/ns1/rack4")); + assertTrue(topology.contains("/ns1/rack5")); + assertTrue(topology.contains("/ns1/rack6")); + + // assert node number + Iterator elements = racks.elements(); + int dataNodesCount = 0; + while(elements.hasNext()){ + JsonNode rack = elements.next(); + Iterator> fields = rack.fields(); + while (fields.hasNext()) { + dataNodesCount += fields.next().getValue().size(); + } + } + assertEquals(18, dataNodesCount); + } + + @Test + public void testPrintTopologyNoDatanodesTextFormat() throws Exception { + // get http Address + String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter() + .getHttpServerAddress().toString(); + + // send http request + URL url = new URL("http:/" + httpAddress + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert node number + assertTrue(topology.contains("No DataNodes")); + } + + @Test + public void testPrintTopologyNoDatanodesJsonFormat() throws Exception { + // get http Address + String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter() + .getHttpServerAddress().toString(); + + // send http request + URL url = new URL("http:/" + httpAddress + "/topology"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setReadTimeout(20000); + conn.setConnectTimeout(20000); + conn.setRequestProperty("Accept", "application/json"); + conn.connect(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); + StringBuilder sb = + new StringBuilder("-- Network Topology -- \n"); + sb.append(out); + sb.append("\n-- Network Topology -- "); + String topology = sb.toString(); + + // assert node number + assertTrue(topology.contains("No DataNodes")); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 7ca52417d9a0a..c05398a31cec3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -253,7 +253,7 @@ private static void setupServlets(HttpServer2 httpServer) { httpServer.addInternalServlet(IsNameNodeActiveServlet.SERVLET_NAME, IsNameNodeActiveServlet.PATH_SPEC, IsNameNodeActiveServlet.class); - httpServer.addInternalServlet("topology", + httpServer.addInternalServlet(NetworkTopologyServlet.SERVLET_NAME, NetworkTopologyServlet.PATH_SPEC, NetworkTopologyServlet.class); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java index 5d089718ccffe..c07d596d696da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java @@ -46,6 +46,7 @@ @InterfaceAudience.Private public class NetworkTopologyServlet extends DfsServlet { + public static final String SERVLET_NAME = "topology"; public static final String PATH_SPEC = "/topology"; protected static final String FORMAT_JSON = "json"; @@ -90,7 +91,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) * @param leaves leaves nodes under base scope * @param format the response format */ - public void printTopology(PrintStream stream, List leaves, + protected void printTopology(PrintStream stream, List leaves, String format) throws BadFormatException, IOException { if (leaves.isEmpty()) { stream.print("No DataNodes"); @@ -120,7 +121,7 @@ public void printTopology(PrintStream stream, List leaves, } } - private void printJsonFormat(PrintStream stream, Map> tree, ArrayList racks) throws IOException { JsonFactory dumpFactory = new JsonFactory(); JsonGenerator dumpGenerator = dumpFactory.createGenerator(stream); @@ -152,7 +153,7 @@ private void printJsonFormat(PrintStream stream, Map> tree, ArrayList racks) { for(String r : racks) { stream.println("Rack: " + r); @@ -171,7 +172,7 @@ private void printTextFormat(PrintStream stream, Map Date: Tue, 8 Jun 2021 21:03:43 +0800 Subject: [PATCH 388/666] YARN-10807. Parents node labels are incorrectly added to child queues in weight mode. Contributed by Benjamin Teke. --- .../scheduler/capacity/ParentQueue.java | 17 +++++++---- .../TestCapacitySchedulerWeightMode.java | 30 +++++++++++++++++++ 2 files changed, 42 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 6b6dd5afd3ed2..6f850187bfd62 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -1250,16 +1250,23 @@ public void updateClusterResource(Resource clusterResource, float sumOfWeight = 0; for (CSQueue queue : childQueues) { - float weight = Math.max(0, - queue.getQueueCapacities().getWeight(nodeLabel)); - sumOfWeight += weight; + if (queue.getQueueCapacities().getExistingNodeLabels() + .contains(nodeLabel)) { + float weight = Math.max(0, + queue.getQueueCapacities().getWeight(nodeLabel)); + sumOfWeight += weight; + } } // When sum of weight == 0, skip setting normalized_weight (so // normalized weight will be 0). if (Math.abs(sumOfWeight) > 1e-6) { for (CSQueue queue : childQueues) { - queue.getQueueCapacities().setNormalizedWeight(nodeLabel, - queue.getQueueCapacities().getWeight(nodeLabel) / sumOfWeight); + if (queue.getQueueCapacities().getExistingNodeLabels() + .contains(nodeLabel)) { + queue.getQueueCapacities().setNormalizedWeight(nodeLabel, + queue.getQueueCapacities().getWeight(nodeLabel) / + sumOfWeight); + } } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java index 300e4cd097a5a..171123a6ce810 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWeightMode.java @@ -338,6 +338,36 @@ public void testGetCapacityOrWeightStringParentPctLeafWeights() } } + /** + * This test ensures that while iterating through a parent's Node Labels + * (when calculating the normalized weights) the parent's Node Labels won't + * be added to the children with weight -1. If the parent + * has a node label that a specific child doesn't the normalized calling the + * normalized weight setter will be skipped. The queue root.b has access to + * the labels "x" and "y", but root.b.b1 won't. For more information see + * YARN-10807. + * @throws Exception + */ + @Test + public void testChildAccessibleNodeLabelsWeightMode() throws Exception { + MockRM rm = new MockRM(getCSConfWithQueueLabelsWeightOnly(conf)); + rm.start(); + + CapacityScheduler cs = + (CapacityScheduler) rm.getRMContext().getScheduler(); + LeafQueue b1 = (LeafQueue) cs.getQueue(B1); + + Assert.assertNotNull(b1); + Assert.assertTrue(b1.getAccessibleNodeLabels().isEmpty()); + + Set b1ExistingNodeLabels = ((CSQueue) b1).getQueueCapacities() + .getExistingNodeLabels(); + Assert.assertEquals(1, b1ExistingNodeLabels.size()); + Assert.assertEquals("", b1ExistingNodeLabels.iterator().next()); + + rm.close(); + } + @Test public void testQueueInfoWeight() throws Exception { MockRM rm = new MockRM(conf); From f872381ad38be31c8e185ac9bf0a296e068fc31b Mon Sep 17 00:00:00 2001 From: Prabhu Josephraj Date: Tue, 8 Jun 2021 20:37:40 +0530 Subject: [PATCH 389/666] YARN-10792. Set Completed AppAttempt LogsLink to Log Server URL. Contributed by Abhinaba Sarkar --- .../webapp/dao/AppAttemptInfo.java | 26 +++++++++--- .../webapp/TestRMWebServicesAppAttempts.java | 40 +++++++++++++++++++ 2 files changed, 61 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java index 00454afe0e5f8..499d4e1937d53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java @@ -23,7 +23,9 @@ import com.google.gson.Gson; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; @@ -31,6 +33,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; +import static org.apache.hadoop.yarn.util.StringHelper.PATH_JOINER; + @XmlRootElement(name = "appAttempt") @XmlAccessorType(XmlAccessType.FIELD) public class AppAttemptInfo { @@ -64,15 +68,29 @@ public AppAttemptInfo(ResourceManager rm, RMAppAttempt attempt, this.id = attempt.getAppAttemptId().getAttemptId(); this.startTime = attempt.getStartTime(); this.finishedTime = attempt.getFinishTime(); + this.appAttemptState = attempt.getAppAttemptState(); + this.appAttemptId = attempt.getAppAttemptId().toString(); Container masterContainer = attempt.getMasterContainer(); if (masterContainer != null && hasAccess) { this.containerId = masterContainer.getId().toString(); this.nodeHttpAddress = masterContainer.getNodeHttpAddress(); this.nodeId = masterContainer.getNodeId().toString(); - this.logsLink = WebAppUtils.getRunningLogURL(schemePrefix - + masterContainer.getNodeHttpAddress(), - masterContainer.getId().toString(), user); + Configuration conf = rm.getRMContext().getYarnConfiguration(); + String logServerUrl = conf.get(YarnConfiguration.YARN_LOG_SERVER_URL); + if ((this.appAttemptState == RMAppAttemptState.FAILED || + this.appAttemptState == RMAppAttemptState.FINISHED || + this.appAttemptState == RMAppAttemptState.KILLED) && + logServerUrl != null) { + this.logsLink = PATH_JOINER.join(logServerUrl, + masterContainer.getNodeId().toString(), + masterContainer.getId().toString(), + masterContainer.getId().toString(), user); + } else { + this.logsLink = WebAppUtils.getRunningLogURL(schemePrefix + + masterContainer.getNodeHttpAddress(), + masterContainer.getId().toString(), user); + } Gson gson = new Gson(); this.exportPorts = gson.toJson(masterContainer.getExposedPorts()); @@ -90,8 +108,6 @@ public AppAttemptInfo(ResourceManager rm, RMAppAttempt attempt, } } } - this.appAttemptId = attempt.getAppAttemptId().toString(); - this.appAttemptState = attempt.getAppAttemptState(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppAttempts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppAttempts.java index df168f757bbb5..102f13897fc79 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppAttempts.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppAttempts.java @@ -60,6 +60,7 @@ import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseStatusCode; import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -124,6 +125,45 @@ public void testAppAttempts() throws Exception { rm.stop(); } + @Test (timeout = 20000) + public void testCompletedAppAttempt() throws Exception { + Configuration conf = rm.getConfig(); + String logServerUrl = "http://localhost:19888/jobhistory/logs"; + conf.set(YarnConfiguration.YARN_LOG_SERVER_URL, logServerUrl); + conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1); + rm.start(); + MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 8192); + MockRMAppSubmissionData data = + MockRMAppSubmissionData.Builder.createWithMemory(CONTAINER_MB, rm) + .withAppName("testwordcount") + .withUser("user1") + .build(); + RMApp app1 = MockRMAppSubmitter.submit(rm, data); + MockAM am = MockRM.launchAndRegisterAM(app1, rm, amNodeManager); + // fail the AM by sending CONTAINER_FINISHED event without registering. + amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1, + ContainerState.COMPLETE); + rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FAILED); + rm.waitForState(app1.getApplicationId(), RMAppState.FAILED); + + WebResource r = resource(); + ClientResponse response = r.path("ws").path("v1").path("cluster") + .path("apps").path(app1.getApplicationId().toString()) + .path("appattempts").accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + JSONObject json = response.getEntity(JSONObject.class); + JSONObject jsonAppAttempts = json.getJSONObject("appAttempts"); + JSONArray jsonArray = jsonAppAttempts.getJSONArray("appAttempt"); + JSONObject info = jsonArray.getJSONObject(0); + String logsLink = info.getString("logsLink"); + String containerId = app1.getCurrentAppAttempt().getMasterContainer() + .getId().toString(); + assertThat(logsLink).isEqualTo(logServerUrl + + "/127.0.0.1:1234/" + containerId + "/" + containerId + "/" + + "user1"); + rm.stop(); + } + @Test (timeout = 20000) public void testMultipleAppAttempts() throws Exception { rm.start(); From 356ce180e003a100ff86d9a93f18ba94672a2f7a Mon Sep 17 00:00:00 2001 From: Ahmed Hussein <50450311+amahussein@users.noreply.github.com> Date: Tue, 8 Jun 2021 11:09:31 -0500 Subject: [PATCH 390/666] HDFS-16042. DatanodeAdminMonitor scan should be delay based (#3058) --- .../hdfs/server/blockmanagement/DatanodeAdminManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java index 8cad44f0ca2fb..70ae44a38092d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java @@ -149,7 +149,7 @@ void activate(Configuration conf) { throw new RuntimeException("Unable to create the Decommission monitor " + "from "+cls, e); } - executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs, + executor.scheduleWithFixedDelay(monitor, intervalSecs, intervalSecs, TimeUnit.SECONDS); LOG.debug("Activating DatanodeAdminManager with interval {} seconds, " + From 79cfe451957ee5bf0fc196a3d638654d056cafa7 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Tue, 8 Jun 2021 21:56:40 +0100 Subject: [PATCH 391/666] HADOOP-17631. Configuration ${env.VAR:-FALLBACK} to eval FALLBACK when restrictSystemProps=true (#2977) Contributed by Steve Loughran. --- .../org/apache/hadoop/conf/Configuration.java | 79 ++++++++++++------- .../apache/hadoop/conf/TestConfiguration.java | 56 +++++++++++++ 2 files changed, 106 insertions(+), 29 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index e0e7ac3960451..e4e36a24a6114 100755 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -1139,36 +1139,37 @@ private String substituteVars(String expr) { final String var = eval.substring(varBounds[SUB_START_IDX], varBounds[SUB_END_IDX]); String val = null; - if (!restrictSystemProps) { - try { - if (var.startsWith("env.") && 4 < var.length()) { - String v = var.substring(4); - int i = 0; - for (; i < v.length(); i++) { - char c = v.charAt(i); - if (c == ':' && i < v.length() - 1 && v.charAt(i + 1) == '-') { - val = getenv(v.substring(0, i)); - if (val == null || val.length() == 0) { - val = v.substring(i + 2); - } - break; - } else if (c == '-') { - val = getenv(v.substring(0, i)); - if (val == null) { - val = v.substring(i + 1); - } - break; + try { + // evaluate system properties or environment variables even when + // the configuration is restricted -the restrictions are enforced + // in the getenv/getProperty calls + if (var.startsWith("env.") && 4 < var.length()) { + String v = var.substring(4); + int i = 0; + for (; i < v.length(); i++) { + char c = v.charAt(i); + if (c == ':' && i < v.length() - 1 && v.charAt(i + 1) == '-') { + val = getenv(v.substring(0, i)); + if (val == null || val.length() == 0) { + val = v.substring(i + 2); } + break; + } else if (c == '-') { + val = getenv(v.substring(0, i)); + if (val == null) { + val = v.substring(i + 1); + } + break; } - if (i == v.length()) { - val = getenv(v); - } - } else { - val = getProperty(var); } - } catch (SecurityException se) { - LOG.warn("Unexpected SecurityException in Configuration", se); + if (i == v.length()) { + val = getenv(v); + } + } else { + val = getProperty(var); } + } catch (SecurityException se) { + LOG.warn("Unexpected SecurityException in Configuration", se); } if (val == null) { val = getRaw(var); @@ -1194,13 +1195,33 @@ private String substituteVars(String expr) { throw new IllegalStateException("Variable substitution depth too large: " + MAX_SUBST + " " + expr); } - + + /** + * Get the environment variable value if + * {@link #restrictSystemProps} does not block this. + * @param name environment variable name. + * @return the value or null if either it is unset or access forbidden. + */ String getenv(String name) { - return System.getenv(name); + if (!restrictSystemProps) { + return System.getenv(name); + } else { + return null; + } } + /** + * Get a system property value if + * {@link #restrictSystemProps} does not block this. + * @param key property key + * @return the value or null if either it is unset or access forbidden. + */ String getProperty(String key) { - return System.getProperty(key); + if (!restrictSystemProps) { + return System.getProperty(key); + } else { + return null; + } } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 085b20442546a..3748eed12246b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -485,6 +485,62 @@ public void testEnvDefault() throws IOException { } } + /** + * Verify that when a configuration is restricted, environment + * variables and system properties will be unresolved. + * The fallback patterns for the variables are still parsed. + */ + @Test + public void testRestrictedEnv() throws IOException { + // this test relies on env.PATH being set on all platforms a + // test run will take place on, and the java.version sysprop + // set in all JVMs. + // Restricted configurations will not get access to these values, so + // will either be unresolved or, for env vars with fallbacks: the fallback + // values. + + conf.setRestrictSystemProperties(true); + + out = new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + // a simple property to reference + declareProperty("d", "D", "D"); + + // system property evaluation stops working completely + declareProperty("system1", "${java.version}", "${java.version}"); + + // the env variable does not resolve + declareProperty("secret1", "${env.PATH}", "${env.PATH}"); + + // but all the fallback options do work + declareProperty("secret2", "${env.PATH-a}", "a"); + declareProperty("secret3", "${env.PATH:-b}", "b"); + declareProperty("secret4", "${env.PATH:-}", ""); + declareProperty("secret5", "${env.PATH-}", ""); + // special case + declareProperty("secret6", "${env.PATH:}", "${env.PATH:}"); + // safety check + declareProperty("secret7", "${env.PATH:--}", "-"); + + // recursive eval of the fallback + declareProperty("secret8", "${env.PATH:-${d}}", "D"); + + // if the fallback doesn't resolve, the result is the whole variable raw. + declareProperty("secret9", "${env.PATH:-$d}}", "${env.PATH:-$d}}"); + + endConfig(); + Path fileResource = new Path(CONFIG); + conf.addResource(fileResource); + + for (Prop p : props) { + System.out.println("p=" + p.name); + String gotVal = conf.get(p.name); + String gotRawVal = conf.getRaw(p.name); + assertEq(p.val, gotRawVal); + assertEq(p.expectEval, gotVal); + } + } + @Test public void testFinalParam() throws IOException { out=new BufferedWriter(new FileWriter(CONFIG)); From f0053572b62cc8079defb5ef9c3498397ec88e53 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 9 Jun 2021 02:33:03 +0530 Subject: [PATCH 392/666] HADOOP-17725. Improve error message for token providers in ABFS (#3041) Contributed by Viraj Jasani. --- .../hadoop/fs/azurebfs/AbfsConfiguration.java | 50 +++++++++++++++---- .../fs/azurebfs/TestAccountConfiguration.java | 49 ++++++++++++++++-- 2 files changed, 83 insertions(+), 16 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index 1c4a09be3c9a7..82a116ba83f11 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -402,6 +402,24 @@ public String getPasswordString(String key) throws IOException { return null; } + /** + * Returns a value for the key if the value exists and is not null. + * Otherwise, throws {@link ConfigurationPropertyNotFoundException} with + * key name. + * + * @param key Account-agnostic configuration key + * @return value if exists + * @throws IOException if error in fetching password or + * ConfigurationPropertyNotFoundException for missing key + */ + private String getMandatoryPasswordString(String key) throws IOException { + String value = getPasswordString(key); + if (value == null) { + throw new ConfigurationPropertyNotFoundException(key); + } + return value; + } + /** * Returns account-specific token provider class if it exists, else checks if * an account-agnostic setting is present for token provider class if AuthType @@ -742,25 +760,33 @@ public AccessTokenProvider getTokenProvider() throws TokenAccessProviderExceptio FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME, null, AccessTokenProvider.class); - AccessTokenProvider tokenProvider = null; + AccessTokenProvider tokenProvider; if (tokenProviderClass == ClientCredsTokenProvider.class) { - String authEndpoint = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT); - String clientId = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID); - String clientSecret = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_SECRET); + String authEndpoint = + getMandatoryPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT); + String clientId = + getMandatoryPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID); + String clientSecret = + getMandatoryPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_SECRET); tokenProvider = new ClientCredsTokenProvider(authEndpoint, clientId, clientSecret); LOG.trace("ClientCredsTokenProvider initialized"); } else if (tokenProviderClass == UserPasswordTokenProvider.class) { - String authEndpoint = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT); - String username = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_USER_NAME); - String password = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_USER_PASSWORD); + String authEndpoint = + getMandatoryPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT); + String username = + getMandatoryPasswordString(FS_AZURE_ACCOUNT_OAUTH_USER_NAME); + String password = + getMandatoryPasswordString(FS_AZURE_ACCOUNT_OAUTH_USER_PASSWORD); tokenProvider = new UserPasswordTokenProvider(authEndpoint, username, password); LOG.trace("UserPasswordTokenProvider initialized"); } else if (tokenProviderClass == MsiTokenProvider.class) { String authEndpoint = getTrimmedPasswordString( FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT, AuthConfigurations.DEFAULT_FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT); - String tenantGuid = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_MSI_TENANT); - String clientId = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID); + String tenantGuid = + getMandatoryPasswordString(FS_AZURE_ACCOUNT_OAUTH_MSI_TENANT); + String clientId = + getMandatoryPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID); String authority = getTrimmedPasswordString( FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY, AuthConfigurations.DEFAULT_FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY); @@ -772,8 +798,10 @@ public AccessTokenProvider getTokenProvider() throws TokenAccessProviderExceptio String authEndpoint = getTrimmedPasswordString( FS_AZURE_ACCOUNT_OAUTH_REFRESH_TOKEN_ENDPOINT, AuthConfigurations.DEFAULT_FS_AZURE_ACCOUNT_OAUTH_REFRESH_TOKEN_ENDPOINT); - String refreshToken = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_REFRESH_TOKEN); - String clientId = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID); + String refreshToken = + getMandatoryPasswordString(FS_AZURE_ACCOUNT_OAUTH_REFRESH_TOKEN); + String clientId = + getMandatoryPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID); tokenProvider = new RefreshTokenBasedTokenProvider(authEndpoint, clientId, refreshToken); LOG.trace("RefreshTokenBasedTokenProvider initialized"); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java index 4cb0961e9364a..86bb2adbe56ed 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java @@ -19,18 +19,22 @@ package org.apache.hadoop.fs.azurebfs; import java.io.IOException; - -import org.assertj.core.api.Assertions; -import org.junit.Test; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.ConfigurationPropertyNotFoundException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationValueException; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TokenAccessProviderException; import org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider; import org.apache.hadoop.fs.azurebfs.oauth2.CustomTokenProviderAdapter; import org.apache.hadoop.fs.azurebfs.services.AuthType; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import org.assertj.core.api.Assertions; +import org.junit.Test; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT; @@ -38,6 +42,8 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_SECRET; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_SAS_TOKEN_PROVIDER_TYPE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; /** * Tests correct precedence of various configurations that might be returned. @@ -60,6 +66,12 @@ public class TestAccountConfiguration { private static final String TEST_CLIENT_ID = "clientId"; private static final String TEST_CLIENT_SECRET = "clientSecret"; + private static final List CONFIG_KEYS = + Collections.unmodifiableList(Arrays.asList( + FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT, + FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID, + FS_AZURE_ACCOUNT_OAUTH_CLIENT_SECRET)); + @Test public void testStringPrecedence() throws IllegalAccessException, IOException, InvalidConfigurationValueException { @@ -361,6 +373,33 @@ public void testAccessTokenProviderPrecedence() testGlobalAndAccountOAuthPrecedence(abfsConf, null, AuthType.OAuth); } + @Test + public void testConfigPropNotFound() throws Throwable { + final String accountName = "account"; + + final Configuration conf = new Configuration(); + final AbfsConfiguration abfsConf = new AbfsConfiguration(conf, accountName); + + for (String key : CONFIG_KEYS) { + setAuthConfig(abfsConf, true, AuthType.OAuth); + abfsConf.unset(key + "." + accountName); + testMissingConfigKey(abfsConf, key); + } + + unsetAuthConfig(abfsConf, false); + unsetAuthConfig(abfsConf, true); + } + + private static void testMissingConfigKey(final AbfsConfiguration abfsConf, + final String confKey) throws Throwable { + GenericTestUtils.assertExceptionContains("Configuration property " + + confKey + " not found.", + LambdaTestUtils.verifyCause( + ConfigurationPropertyNotFoundException.class, + LambdaTestUtils.intercept(TokenAccessProviderException.class, + () -> abfsConf.getTokenProvider().getClass().getTypeName()))); + } + public void testGlobalAndAccountOAuthPrecedence(AbfsConfiguration abfsConf, AuthType globalAuthType, AuthType accountSpecificAuthType) From 0092e71eb69a78a376fce003a15ce699d6810569 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 9 Jun 2021 08:04:45 +0530 Subject: [PATCH 393/666] HDFS-15916. Addendum. DistCp: Backward compatibility: Distcp fails from Hadoop 3 to Hadoop 2 for snapshotdiff. (#3056) --- .../java/org/apache/hadoop/hdfs/DistributedFileSystem.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index eda92be12c627..e67a4fa7d6183 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -2395,7 +2395,8 @@ private SnapshotDiffReport getSnapshotDiffReportInternal( } catch (RpcNoSuchMethodException e) { // In case the server doesn't support getSnapshotDiffReportListing, // fallback to getSnapshotDiffReport. - LOG.warn("Falling back to getSnapshotDiffReport {}", e.getMessage()); + DFSClient.LOG.warn( + "Falling back to getSnapshotDiffReport {}", e.getMessage()); return dfs.getSnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot); } startPath = report.getLastPath(); From 351d93d95cd0d0258b1723b6c5447658f74ae11f Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 9 Jun 2021 08:07:23 +0530 Subject: [PATCH 394/666] YARN-10809. Missing dependency causing NoClassDefFoundError in TestHBaseTimelineStorageUtils (#3081) Reviewed-by: Takanobu Asanuma --- .../hadoop-yarn-server-timelineservice-hbase-client/pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml index 6207a2d024eb2..9a88e83109526 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml @@ -110,6 +110,12 @@ hadoop-yarn-server-timelineservice + + org.mockito + mockito-core + test + + org.apache.hbase hbase-common From 5d2657eeef4114c86552cdf6ca1f64f128b58cc1 Mon Sep 17 00:00:00 2001 From: snehavarma Date: Wed, 9 Jun 2021 10:54:10 +0530 Subject: [PATCH 395/666] HADOOP-17715 ABFS: Append blob tests with non HNS accounts fail (#3028) --- .../hadoop-azure/dev-support/testrun-scripts/runtests.sh | 7 +++++++ .../src/test/resources/azure-auth-keys.xml.template | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-azure/dev-support/testrun-scripts/runtests.sh b/hadoop-tools/hadoop-azure/dev-support/testrun-scripts/runtests.sh index d3d40621accc7..25d9593d573eb 100755 --- a/hadoop-tools/hadoop-azure/dev-support/testrun-scripts/runtests.sh +++ b/hadoop-tools/hadoop-azure/dev-support/testrun-scripts/runtests.sh @@ -26,6 +26,7 @@ set -eo pipefail begin ### ADD THE TEST COMBINATIONS BELOW. DO NOT EDIT THE ABOVE LINES. +### THE SCRIPT REQUIRES THE FOLLOWING UTILITIES xmlstarlet AND pcregrep. combination=HNS-OAuth @@ -34,6 +35,12 @@ properties=("fs.azure.abfs.account.name" "fs.azure.test.namespace.enabled" values=("{account name}.dfs.core.windows.net" "true" "OAuth") generateconfigs +combination=AppendBlob-HNS-OAuth +properties=("fs.azure.abfs.account.name" "fs.azure.test.namespace.enabled" +"fs.azure.account.auth.type" "fs.azure.test.appendblob.enabled") +values=("{account name}.dfs.core.windows.net" "true" "OAuth" "true") +generateconfigs + combination=HNS-SharedKey properties=("fs.azure.abfs.account.name" "fs.azure.test.namespace.enabled" "fs.azure.account.auth.type") values=("{account name}.dfs.core.windows.net" "true" "SharedKey") diff --git a/hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml.template b/hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml.template index 2e6e2750c3b78..12dbbfab47970 100644 --- a/hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml.template +++ b/hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml.template @@ -167,7 +167,8 @@ create the test FS instance. false If made true, tests will be running under the assumption that append blob is enabled and the root directory and contract test root - directory will be part of the append blob directories. + directory will be part of the append blob directories. Should be false for + non-HNS accounts. From a97cfe9c087b70efccf3f0673154af36abf2c1d2 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 9 Jun 2021 11:42:48 +0530 Subject: [PATCH 396/666] HDFS-16054. Replace Guava Lists usage by Hadoop's own Lists in hadoop-hdfs-project (#3073) --- .../hadoop-hdfs-client/pom.xml | 32 ++++++ .../org/apache/hadoop/hdfs/DFSClient.java | 2 +- .../hadoop/hdfs/DistributedFileSystem.java | 2 +- .../org/apache/hadoop/hdfs/XAttrHelper.java | 2 +- .../hadoop/hdfs/protocol/LocatedBlock.java | 3 +- .../hdfs/protocol/ReencryptionStatus.java | 2 +- .../protocol/datatransfer/PipelineAck.java | 2 +- .../sasl/SaslDataTransferClient.java | 2 +- .../ClientNamenodeProtocolTranslatorPB.java | 3 +- .../hdfs/protocolPB/PBHelperClient.java | 2 +- .../hadoop/hdfs/web/JsonUtilClient.java | 2 +- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 2 +- .../ha/TestRequestHedgingProxyProvider.java | 2 +- .../hdfs/web/TestURLConnectionFactory.java | 2 +- .../hadoop-hdfs-httpfs/pom.xml | 32 ++++++ .../fs/http/client/HttpFSFileSystem.java | 2 +- .../apache/hadoop/lib/wsrs/Parameters.java | 3 +- .../hadoop/lib/wsrs/ParametersProvider.java | 2 +- .../fs/http/client/BaseTestHttpFSWith.java | 3 +- hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 37 +++++++ hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 32 ++++++ .../federation/router/TestRouterAdmin.java | 3 +- .../TestRouterFederationRenamePermission.java | 2 +- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 32 ++++++ .../java/org/apache/hadoop/hdfs/DFSUtil.java | 2 +- .../java/org/apache/hadoop/hdfs/HAUtil.java | 2 +- .../sasl/SaslDataTransferServer.java | 2 +- .../qjournal/client/QuorumJournalManager.java | 2 +- .../hdfs/qjournal/server/JournalNode.java | 2 +- .../qjournal/server/JournalNodeSyncer.java | 2 +- .../DelegationTokenSecretManager.java | 2 +- .../server/aliasmap/InMemoryAliasMap.java | 2 +- .../BlockStoragePolicySuite.java | 2 +- .../PendingDataNodeMessages.java | 2 +- .../blockmanagement/SlowDiskTracker.java | 2 +- .../hadoop/hdfs/server/common/Util.java | 10 +- .../hdfs/server/datanode/BPOfferService.java | 2 +- .../server/datanode/BlockPoolManager.java | 2 +- .../datanode/BlockPoolSliceStorage.java | 2 +- .../hadoop/hdfs/server/datanode/DataNode.java | 1 - .../hdfs/server/datanode/DataStorage.java | 3 +- .../fsdataset/impl/FsDatasetImpl.java | 2 +- .../server/diskbalancer/command/Command.java | 7 +- .../diskbalancer/command/ReportCommand.java | 5 +- .../hadoop/hdfs/server/mover/Mover.java | 13 +-- .../hdfs/server/namenode/AclStorage.java | 8 +- .../server/namenode/AclTransformation.java | 10 +- .../hdfs/server/namenode/BackupImage.java | 2 +- .../hdfs/server/namenode/CacheManager.java | 7 +- .../hdfs/server/namenode/Checkpointer.java | 11 ++- .../namenode/EncryptionZoneManager.java | 13 +-- .../hdfs/server/namenode/FSDirAttrOp.java | 3 +- .../namenode/FSDirEncryptionZoneOp.java | 4 +- .../server/namenode/FSDirErasureCodingOp.java | 5 +- .../namenode/FSDirSatisfyStoragePolicyOp.java | 3 +- .../hdfs/server/namenode/FSDirXAttrOp.java | 7 +- .../hdfs/server/namenode/FSEditLog.java | 3 +- .../hdfs/server/namenode/FSEditLogOp.java | 2 +- .../hadoop/hdfs/server/namenode/FSImage.java | 2 +- .../namenode/FSImageFormatProtobuf.java | 2 +- .../FSImageTransactionalStorageInspector.java | 7 +- .../hdfs/server/namenode/FSNamesystem.java | 3 +- .../server/namenode/FileJournalManager.java | 2 +- .../InotifyFSEditLogOpTranslator.java | 2 +- .../hdfs/server/namenode/JournalSet.java | 2 +- .../hdfs/server/namenode/LeaseManager.java | 6 +- .../hdfs/server/namenode/NNStorage.java | 2 +- .../namenode/NNStorageRetentionManager.java | 2 +- .../hadoop/hdfs/server/namenode/NameNode.java | 2 +- .../server/namenode/NameNodeRpcServer.java | 3 +- .../server/namenode/ReencryptionUpdater.java | 7 +- .../server/namenode/SecondaryNameNode.java | 2 +- .../hdfs/server/namenode/TransferFsImage.java | 2 +- .../namenode/XAttrPermissionFilter.java | 2 +- .../namenode/ha/StandbyCheckpointer.java | 3 +- .../DirectorySnapshottableFeature.java | 2 +- .../namenode/snapshot/SnapshotManager.java | 2 +- .../namenode/top/metrics/TopMetrics.java | 3 +- .../web/resources/NamenodeWebHdfsMethods.java | 2 +- .../offlineImageViewer/FSImageLoader.java | 14 +-- .../offlineImageViewer/PBImageTextWriter.java | 44 +++++---- .../offlineImageViewer/PBImageXmlWriter.java | 7 +- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 5 +- .../fs/viewfs/TestViewFileSystemWithAcls.java | 2 +- .../hadoop/fs/viewfs/TestViewFsWithAcls.java | 2 +- .../hadoop/hdfs/AdminStatesBaseTest.java | 2 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 2 +- .../apache/hadoop/hdfs/MiniDFSCluster.java | 2 +- .../apache/hadoop/hdfs/MiniDFSNNTopology.java | 2 +- .../hdfs/TestBatchedListDirectories.java | 2 +- .../hadoop/hdfs/TestBlockStoragePolicy.java | 3 +- .../apache/hadoop/hdfs/TestDFSFinalize.java | 3 +- .../apache/hadoop/hdfs/TestDFSRollback.java | 2 +- .../org/apache/hadoop/hdfs/TestDFSShell.java | 2 +- .../apache/hadoop/hdfs/TestDecommission.java | 2 +- .../hadoop/hdfs/TestEncryptionZones.java | 2 +- .../hdfs/TestErasureCodingExerciseAPIs.java | 2 +- .../apache/hadoop/hdfs/TestExtendedAcls.java | 2 +- .../hadoop/hdfs/TestMaintenanceState.java | 2 +- .../org/apache/hadoop/hdfs/TestQuota.java | 2 +- .../org/apache/hadoop/hdfs/TestSafeMode.java | 2 +- .../hdfs/TestSafeModeWithStripedFile.java | 2 +- .../TestTrashWithSecureEncryptionZones.java | 2 +- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 2 +- .../hdfs/qjournal/MiniJournalCluster.java | 3 +- .../hadoop/hdfs/qjournal/QJMTestUtil.java | 3 +- .../client/TestQuorumJournalManager.java | 2 +- .../client/TestQuorumJournalManagerUnit.java | 2 +- .../qjournal/server/TestJournalNodeSync.java | 2 +- .../blockmanagement/TestBlockManager.java | 2 +- .../TestRBWBlockInvalidation.java | 2 +- .../TestInMemoryLevelDBAliasMapClient.java | 2 +- .../server/datanode/TestBPOfferService.java | 9 +- .../datanode/TestDataNodeHotSwapVolumes.java | 5 +- .../server/datanode/TestDataNodeMetrics.java | 3 +- .../server/datanode/TestDatanodeRegister.java | 12 +-- .../fsdataset/impl/TestFsDatasetImpl.java | 8 +- .../web/webhdfs/TestDataNodeUGIProvider.java | 4 +- .../command/TestDiskBalancerCommand.java | 3 +- .../hdfs/server/namenode/FSAclBaseTest.java | 2 +- .../hdfs/server/namenode/FSImageTestUtil.java | 3 +- .../hdfs/server/namenode/FSXAttrBaseTest.java | 2 +- .../server/namenode/TestAclConfigFlag.java | 6 +- .../namenode/TestAclTransformation.java | 9 +- .../hdfs/server/namenode/TestAuditLogger.java | 2 +- .../hdfs/server/namenode/TestBackupNode.java | 4 +- .../hdfs/server/namenode/TestCheckpoint.java | 4 +- .../hdfs/server/namenode/TestEditLog.java | 7 +- .../hdfs/server/namenode/TestFSDirectory.java | 6 +- .../hdfs/server/namenode/TestFSImage.java | 4 +- .../server/namenode/TestFSImageWithAcl.java | 4 +- .../namenode/TestINodeAttributeProvider.java | 4 +- .../server/namenode/TestLeaseManager.java | 3 +- .../TestNNStorageRetentionManager.java | 5 +- .../server/namenode/ha/TestDNFencing.java | 9 +- .../ha/TestEditLogsDuringFailover.java | 3 +- .../server/namenode/ha/TestHASafeMode.java | 5 +- .../namenode/ha/TestStandbyCheckpoints.java | 38 +++---- .../ha/TestStandbyInProgressTail.java | 9 +- .../snapshot/TestAclWithSnapshot.java | 4 +- .../snapshot/TestFileWithSnapshotFeature.java | 7 +- .../hadoop/hdfs/tools/TestDFSAdmin.java | 34 +++---- ...wFileSystemOverloadSchemeWithDFSAdmin.java | 4 +- ...ileSystemOverloadSchemeWithFSCommands.java | 4 +- .../TestOfflineImageViewer.java | 98 +++++++++---------- .../TestOfflineImageViewerForAcl.java | 11 ++- .../apache/hadoop/hdfs/web/TestJsonUtil.java | 3 +- 147 files changed, 531 insertions(+), 354 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index d65e6030369b3..87e5b23c40361 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -178,6 +178,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop.hdfs.protocol.proto + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index f568308aff8f6..3fa4dd06fec52 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -186,6 +186,7 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum.Type; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; import org.apache.hadoop.tracing.TraceScope; @@ -196,7 +197,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; /******************************************************** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index e67a4fa7d6183..747699d260f90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -23,7 +23,6 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.collections.list.TreeList; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; @@ -118,6 +117,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.util.ChunkedArrayList; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java index f6e32c40e80fe..ed6dd2fa2093f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java @@ -24,10 +24,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr.NameSpace; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java index f2d8135ab4dee..9085e87ce60e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java @@ -28,8 +28,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.security.token.Token; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; /** * Associates a block with the Datanodes that contain its replicas diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java index 5d7b91343f239..93edbd048774b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java @@ -19,11 +19,11 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto; import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus.State; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java index b58fbb8992a47..3d9e48ceebd51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java @@ -24,7 +24,6 @@ import java.io.OutputStream; import java.util.ArrayList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -32,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.thirdparty.protobuf.TextFormat; import org.apache.hadoop.hdfs.util.LongBitFormat; +import org.apache.hadoop.util.Lists; /** Pipeline Acknowledgment **/ @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java index cf6a76a5187bc..9dadd5263d62a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java @@ -61,11 +61,11 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Negotiates SASL for DataTransferProtocol on behalf of a client. There are diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 0886636419b67..7f7bdc8ef83d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -24,8 +24,6 @@ import java.util.HashMap; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -256,6 +254,7 @@ import org.apache.hadoop.thirdparty.protobuf.Message; import org.apache.hadoop.thirdparty.protobuf.ServiceException; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.concurrent.AsyncGet; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index bacd45d7b38d8..9de9c373040e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -32,7 +32,6 @@ import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.primitives.Shorts; import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.thirdparty.protobuf.CodedInputStream; @@ -216,6 +215,7 @@ import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.LimitInputStream; +import org.apache.hadoop.util.Lists; /** * Utilities for converting protobuf classes to and from hdfs-client side diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index fd31da3ae5bf0..6acd062568e86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -20,7 +20,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; @@ -57,6 +56,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import java.io.ByteArrayInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index d7dd91172846a..7a0b1fb365353 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -130,6 +130,7 @@ import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.KMSUtil; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; @@ -138,7 +139,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java index 5365f6023aed5..346f79cee9903 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java @@ -38,6 +38,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Before; @@ -55,7 +56,6 @@ import static org.mockito.Mockito.when; import static org.mockito.Mockito.mock; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.event.Level; public class TestRequestHedgingProxyProvider { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index eebe0baaf2356..1fe6dcad932bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -31,10 +31,10 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.LoggerFactory; public final class TestURLConnectionFactory { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 1916ef0e3b7f6..b58a6517dd72c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -343,6 +343,38 @@ ${basedir}/dev-support/findbugsExcludeFile.xml + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 2866044badcd2..549eb704ee12f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -64,6 +64,7 @@ import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator; import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator; import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; @@ -73,7 +74,6 @@ import org.json.simple.parser.ParseException; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import java.io.BufferedInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java index c171e929ca6d6..155501b250f94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java @@ -18,8 +18,7 @@ package org.apache.hadoop.lib.wsrs; import org.apache.hadoop.classification.InterfaceAudience; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import java.util.List; import java.util.Map; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java index 56a999f519cdf..7addec58c2fb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java @@ -18,7 +18,6 @@ package org.apache.hadoop.lib.wsrs; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.api.core.HttpContext; import com.sun.jersey.core.spi.component.ComponentContext; import com.sun.jersey.core.spi.component.ComponentScope; @@ -26,6 +25,7 @@ import com.sun.jersey.spi.inject.Injectable; import com.sun.jersey.spi.inject.InjectableProvider; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import javax.ws.rs.core.Context; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index a53d17fc2d2f4..84cd055c4cb08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -67,6 +67,7 @@ import org.apache.hadoop.test.TestHdfsHelper; import org.apache.hadoop.test.TestJetty; import org.apache.hadoop.test.TestJettyHelper; +import org.apache.hadoop.util.Lists; import org.junit.Assert; import org.junit.Assume; import org.junit.Test; @@ -75,8 +76,6 @@ import org.eclipse.jetty.server.Server; import org.eclipse.jetty.webapp.WebAppContext; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import java.io.File; import java.io.FileOutputStream; import java.io.FileWriter; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index ab21583964b5e..8c02df5c7c515 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -170,6 +170,43 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + + + + dist diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index e17602d1f6466..f91288cd959a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -305,6 +305,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java index abeeb73a81cd2..c2eaddc17a2a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; import org.junit.AfterClass; import org.junit.Before; @@ -69,8 +70,6 @@ import org.mockito.Mockito; import org.mockito.internal.util.reflection.FieldSetter; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * The administrator interface of the {@link Router} implemented by * {@link RouterAdminServer}. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java index cb828db1b5587..fa6b62cadd9ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java @@ -41,7 +41,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 95ac71bae22c8..a6af11b6db687 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -437,6 +437,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index cd15cd06a66c2..9f7e9d5d7c9dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodesInPath; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -107,7 +108,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.BlockingService; @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 53d3b4b2936cb..58034ddad4705 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -58,7 +58,7 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.slf4j.LoggerFactory; @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java index 0e2dc71930615..1e14837509dc2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java @@ -56,11 +56,11 @@ import org.apache.hadoop.security.SaslPropertiesResolver; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Negotiates SASL for DataTransferProtocol on behalf of a server. There are diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index 354b250bcc1f1..fd750031adbeb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -61,7 +62,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.TextFormat; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 4ba880bc9c124..5d8e529303c44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -20,8 +20,8 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.VersionInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java index 8a03fcadbadea..96f7a18455428 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.qjournal.server; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; @@ -39,6 +38,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index 68f3dd6d67f12..aef2306a55c15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map.Entry; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -50,7 +51,6 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.ByteString; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java index 4060b78208509..b3539f341c545 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java @@ -17,7 +17,6 @@ package org.apache.hadoop.hdfs.server.aliasmap; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; @@ -38,6 +37,7 @@ import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.Lists; import org.fusesource.leveldbjni.JniDBFactory; import org.iq80.leveldb.DB; import org.iq80.leveldb.DBIterator; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java index 033e95a572297..90c9cc7f4e047 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java @@ -19,7 +19,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; @@ -28,6 +27,7 @@ import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java index 8a8501f4aeee5..96e5617344ca2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.util.Lists; /** * In the Standby Node, we can receive messages about blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java index 782340185c986..0a5ccc6989f7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java @@ -25,7 +25,6 @@ import com.fasterxml.jackson.databind.ObjectWriter; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.primitives.Doubles; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -33,6 +32,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports.DiskOp; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Timer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java index 14cc1c4576d85..da90b6c50dc58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java @@ -36,10 +36,6 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -50,10 +46,16 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + @InterfaceAudience.Private public final class Util { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index ad6b2d61d2213..dcfbfbb42e69c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -19,7 +19,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; @@ -32,6 +31,7 @@ import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java index ef4d292a82e87..20e5bb7a75340 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java @@ -28,11 +28,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index d2da2c5678924..e92ae075c7f35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -45,10 +45,10 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Manages storage for the set of BlockPoolSlices which share a particular diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 5a3a032a658f3..1e349759c9bc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -218,7 +218,6 @@ import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 03e99864a07f6..51532bb9c919d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -61,9 +61,10 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Lists; + import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index faca370b88fa7..3a4c3b09191e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -118,13 +118,13 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.InstrumentedReadWriteLock; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Timer; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java index 6845c572ef6b2..0e4e61490d5cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java @@ -20,9 +20,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; @@ -50,9 +47,13 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.HostsFileReader; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + import java.io.Closeable; import java.io.IOException; import java.io.PrintStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java index ad5a3c2090edf..f93a909418332 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java @@ -32,9 +32,10 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet; import org.apache.hadoop.hdfs.tools.DiskBalancerCLI; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; + /** * Executes the report command. @@ -153,7 +154,7 @@ private void handleNodeReport(final CommandLine cmd, TextStringBuilder result, recordOutput(result, outputLine); - List dbdns = Lists.newArrayList(); + List dbdns; try { dbdns = getNodes(nodeVal); } catch (DiskBalancerException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index cae6b68793580..8b9e9ed742050 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -17,13 +17,7 @@ */ package org.apache.hadoop.hdfs.server.mover; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.cli.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -52,11 +46,18 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java index 806cdc6d6940b..e26f92b12ca07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java @@ -20,10 +20,6 @@ import java.util.Collections; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; @@ -35,6 +31,10 @@ import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.util.ReferenceCountMap; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * AclStorage contains utility methods that define how ACL data is stored in the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java index 031929ce67c79..83ca54e0bb21c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java @@ -28,11 +28,6 @@ import java.util.Iterator; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.com.google.common.collect.Ordering; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; @@ -41,6 +36,11 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.ScopedAclEntries; import org.apache.hadoop.hdfs.protocol.AclException; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Ordering; /** * AclTransformation defines the operations that can modify an ACL. All ACL diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java index bbe607670f71a..48a3416922baf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java @@ -28,10 +28,10 @@ import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageState; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Extension of FSImage for the backup node. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 68ab12c9eb4e3..a937e0aef4b9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -86,15 +86,16 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.LightWeightGSet; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * The Cache Manager handles caching on DataNodes. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index 0557580404a7f..618a3725963a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -28,9 +28,6 @@ import java.net.URL; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.math.LongMath; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; @@ -42,8 +39,12 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.math.LongMath; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The Checkpointer is responsible for supporting periodic checkpoints @@ -304,7 +305,7 @@ static void rollForwardByApplyingLogs( FSNamesystem dstNamesystem) throws IOException { NNStorage dstStorage = dstImage.getStorage(); - List editsStreams = Lists.newArrayList(); + List editsStreams = Lists.newArrayList(); for (RemoteEditLog log : manifest.getLogs()) { if (log.getEndTxId() > dstImage.getLastAppliedTxId()) { File f = dstStorage.findFinalizedEditsFile( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index 2f5fde8e94015..6ab22e6aaa28d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -29,11 +29,6 @@ import java.util.NavigableMap; import java.util.TreeMap; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.conf.Configuration; @@ -55,10 +50,16 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_REENCRYPTION_STATUS_NUM_RESPONSES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_REENCRYPTION_STATUS_NUM_RESPONSES_KEY; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index 5914d7449619c..a2d57cfa6537c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -37,8 +37,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.util.EnumCounters; import org.apache.hadoop.security.AccessControlException; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import java.io.FileNotFoundException; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java index 516d59415401c..f5b8895e9e858 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java @@ -51,11 +51,11 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.Time; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; -import org.apache.hadoop.util.Time; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; import static org.apache.hadoop.util.Time.monotonicNow; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 11981b27183d6..4815dfce986ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.FileStatus; @@ -34,6 +32,9 @@ import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.erasurecode.CodecRegistry; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java index 4057bbd211c0a..81b21964712f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java @@ -32,8 +32,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; /** * Helper class to perform storage policy satisfier related operations. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index ce79321f96801..96dfdf983abcb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -17,9 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.fs.FileStatus; @@ -34,6 +31,10 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.io.FileNotFoundException; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 6048457955b88..db35b8b17c726 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -109,10 +109,11 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.token.delegation.DelegationKey; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 69c95b0521e01..674676546f1b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -132,6 +132,7 @@ import org.apache.hadoop.ipc.RpcConstants; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.xml.sax.ContentHandler; import org.xml.sax.SAXException; @@ -141,7 +142,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Helper classes for reading the ops from an InputStream. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 86b4150777edc..f7749ce7e231b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -73,11 +73,11 @@ import org.apache.hadoop.log.LogThrottlingHelper; import org.apache.hadoop.log.LogThrottlingHelper.LogAction; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * FSImage handles checkpointing and logging of the namespace edits. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index 183449f574ec9..58c24d4377be0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -75,8 +75,8 @@ import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java index 9f71f69902c86..4a40471971462 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java @@ -28,15 +28,16 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; class FSImageTransactionalStorageInspector extends FSImageStorageInspector { public static final Logger LOG = LoggerFactory.getLogger( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6abe0c478a079..6db856b89a20d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -335,6 +335,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.delegation.DelegationKey; +import org.apache.hadoop.util.Lists; import org.apache.log4j.Logger; import org.apache.log4j.Appender; import org.apache.log4j.AsyncAppender; @@ -344,8 +345,8 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + import org.slf4j.LoggerFactory; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 9b6f82f088450..e3db4b7ade29b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -44,11 +44,11 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java index ba40da4d843b5..49c7abe27d0f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java @@ -18,12 +18,12 @@ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.inotify.Event; import org.apache.hadoop.hdfs.inotify.EventBatch; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; +import org.apache.hadoop.util.Lists; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index e17d7b3101b26..7a42f3aa31255 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -41,11 +41,11 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Manages a collection of Journals. None of the methods are synchronized, it is diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 15c940aec618f..da427dc7e51be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -35,8 +35,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -49,10 +47,12 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.Time; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.util.Time; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index f99176d449de6..092885a63897c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -54,12 +54,12 @@ import org.apache.hadoop.hdfs.util.PersistentLongFile; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.DNS; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; import org.eclipse.jetty.util.ajax.JSON; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * NNStorage is responsible for management of the StorageDirectories used by diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java index 065cf738c6c5c..a4758bf695f94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java @@ -36,10 +36,10 @@ import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.util.MD5FileUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * The NNStorageRetentionManager is responsible for inspecting the storage diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index d19e770a98a1f..ad699a784d99a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -20,7 +20,6 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.util.Set; import org.apache.commons.logging.Log; @@ -91,6 +90,7 @@ import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.JvmPauseMonitor; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 90819c28ffc3f..580a991f343a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -45,8 +45,6 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -219,6 +217,7 @@ import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService; import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB; import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionUtil; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java index f2d09b0627f3d..89fd40a0d470b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java @@ -17,9 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; @@ -29,10 +26,14 @@ import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; import org.apache.hadoop.hdfs.server.namenode.ReencryptionHandler.ReencryptionBatch; import org.apache.hadoop.ipc.RetriableException; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StopWatch; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + import java.io.IOException; import java.util.Arrays; import java.util.Iterator; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 76e6a3d85c248..2731a7dc4b051 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -29,7 +29,6 @@ import java.security.PrivilegedExceptionAction; import java.util.*; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.HelpFormatter; @@ -74,6 +73,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 7ba6d839bea1d..13337bea18ef5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -54,11 +54,11 @@ import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; import org.apache.http.client.utils.URIBuilder; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.eclipse.jetty.io.EofException; import static org.apache.hadoop.hdfs.server.common.Util.IO_FILE_BUFFER_SIZE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index 2d3adb2975324..da760e9ddd020 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -24,8 +24,8 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index 1a86f8e82f7dd..7d47fecaf90b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.util.Time.monotonicNow; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.IOException; import java.net.URI; import java.net.URL; @@ -47,10 +46,12 @@ import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java index affdb1ba3ae66..9f34b5b657da3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java @@ -45,11 +45,11 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * A directory with this feature is a snapshottable directory, where snapshots diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 68bf998e0d8f7..f9ab3945a03eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -37,7 +37,6 @@ import javax.management.ObjectName; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; @@ -64,6 +63,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.metrics2.util.MBeans; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java index 3f6bb13228d55..43ce7fcff71c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode.top.metrics; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -32,7 +31,9 @@ import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.lib.Interns; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index e7bf32fcc05f0..249bac62ca79b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -112,11 +112,11 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.spi.container.ResourceFilters; /** Web-hdfs NameNode implementation. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java index 0d4781d12f7ae..5641bd5030daf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java @@ -30,12 +30,6 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.protobuf.CodedInputStream; -import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.AclEntry; @@ -58,8 +52,14 @@ import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.protobuf.CodedInputStream; +import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; + +import org.apache.hadoop.util.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * FSImageLoader loads fsimage and provide methods to return JSON formatted diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java index ccab7b0c6957f..ef0b168658c68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java @@ -17,9 +17,25 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import java.io.BufferedInputStream; +import java.io.Closeable; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintStream; +import java.io.RandomAccessFile; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + import org.apache.commons.lang3.StringUtils; import org.apache.commons.text.StringEscapeUtils; import org.apache.hadoop.conf.Configuration; @@ -40,7 +56,9 @@ import org.apache.hadoop.hdfs.server.namenode.SerialNumberManager; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.LimitInputStream; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; + import org.fusesource.leveldbjni.JniDBFactory; import org.iq80.leveldb.DB; import org.iq80.leveldb.Options; @@ -48,24 +66,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.BufferedInputStream; -import java.io.Closeable; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.PrintStream; -import java.io.RandomAccessFile; -import java.io.UnsupportedEncodingException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * This class reads the protobuf-based fsimage and generates text output diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java index 920dfdc6dc933..8030ead8b5212 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java @@ -31,7 +31,6 @@ import java.util.Map; import java.util.TimeZone; -import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.commons.codec.binary.Hex; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -68,10 +67,12 @@ import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.util.LimitInputStream; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.protobuf.ByteString; + import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_MASK; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_OFFSET; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_EXT_MASK; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index aa490ef940c75..307b0e8440fe2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.web; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; @@ -37,10 +36,12 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; + import com.fasterxml.jackson.databind.ObjectMapper; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.IOException; import java.util.*; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java index a3ccbdb847105..10b6f17ad2843 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.fs.viewfs; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; @@ -28,6 +27,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java index 694339dc81324..1243add66a136 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.fs.viewfs; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContextTestHelper; @@ -29,6 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java index 20ec58bc75b1f..10b18032e13e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java @@ -28,7 +28,7 @@ import java.util.Map; import java.util.Random; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 7aa89596c11ad..74082a21707c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -75,11 +75,11 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdfs.tools.DFSck; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index fe80633b295be..f1b63acd96471 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hdfs.server.namenode.ImageServlet; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -148,7 +149,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * This class creates a single-process DFS cluster for junit testing. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java index 1d7f38dae4412..31707d672c39a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; /** * This class is used to specify the setup of namenodes when instantiating diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java index 54dd33bbc919c..11bfa2fe27d54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonPathCapabilities; import org.apache.hadoop.fs.FSDataOutputStream; @@ -32,6 +31,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; import org.hamcrest.core.StringContains; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 775f7b377aa42..54c3eda4b86b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -24,8 +24,6 @@ import java.io.IOException; import java.util.*; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.FSDataOutputStream; @@ -47,6 +45,7 @@ import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.junit.Assert; import static org.junit.Assert.fail; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java index a9d97cfc297f4..01210d2dab786 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java @@ -25,6 +25,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -35,8 +36,6 @@ import org.junit.After; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * This test ensures the appropriate response from the system when * the system is finalized. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 99aed611824b0..bcb37e340f605 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -41,7 +42,6 @@ import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * This test ensures the appropriate response (successful or failure) from diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 72ac47c67968c..952ae410a50a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -35,9 +35,9 @@ import java.util.zip.GZIPOutputStream; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 4f5ab17900db8..95fbf217343a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -40,7 +40,6 @@ import java.util.EnumSet; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.text.TextStringBuilder; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -76,6 +75,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; import org.junit.Assert; import org.junit.Ignore; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 4404c962b81f6..0775e0426d0f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -44,7 +44,6 @@ import java.util.concurrent.Future; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherSuite; @@ -100,6 +99,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java index 622980114a0a3..e4b09a8b336c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.fs.*; @@ -33,6 +32,7 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java index 1b8dfa81946ba..b4baadfa041ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -27,6 +26,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java index 265d410729017..5d2365a349c9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; import org.junit.Assert; @@ -61,7 +62,6 @@ import org.slf4j.LoggerFactory; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * This class tests node maintenance. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index 79088d3be8555..2e36b131fb3c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -57,6 +57,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; import org.junit.AfterClass; import org.junit.Assert; @@ -65,7 +66,6 @@ import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.junit.rules.Timeout; import org.slf4j.Logger; import org.slf4j.event.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index b741ec695d14e..3e9231f476005 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -29,6 +29,7 @@ import java.security.PrivilegedExceptionAction; import java.util.List; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -60,7 +61,6 @@ import org.junit.Test; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Tests to verify safe mode correctness. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java index 21af9ec597261..74b2482cb2ff0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -27,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.Before; import org.junit.Rule; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java index 47288847519d9..94b9c174b1fe0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java @@ -32,7 +32,6 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys. DFS_DATA_TRANSFER_PROTECTION_KEY; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.KMSConfiguration; @@ -50,6 +49,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index fcd66011ee08d..e9bcef527085d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -112,12 +112,12 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.Lists; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.ByteString; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java index 1c5a5dd6b84a9..d0bbd44f1afbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java @@ -29,6 +29,8 @@ import java.util.concurrent.TimeoutException; import java.util.function.Supplier; + +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -41,7 +43,6 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.test.GenericTestUtils; public class MiniJournalCluster { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java index 91680578c6ff5..fa0e6ef574f7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java @@ -41,8 +41,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; public abstract class QJMTestUtil { public static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java index fb1cdb5de0b61..c4760a0a6ea90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java @@ -40,6 +40,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeoutException; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -68,7 +69,6 @@ import org.mockito.Mockito; import org.mockito.stubbing.Stubber; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.event.Level; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java index 2e2fc57ce8899..c75f6e8e3449a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java @@ -32,6 +32,7 @@ import java.util.List; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.util.Lists; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; @@ -58,7 +59,6 @@ import org.mockito.stubbing.Stubber; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java index ffe20679e38b8..bc4cf3a6ee7b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.qjournal.server; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -36,6 +35,7 @@ .getLogFile; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 59709423f9f62..a8fd71ba49dc8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -20,11 +20,11 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.util.Lists; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java index 541db85f87857..f4dd3f7c88297 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.List; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -47,7 +48,6 @@ import org.junit.Test; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Test when RBW block is removed. Invalidation of the corrupted block happens diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java index 1996f4455af5f..4d03df1f4a231 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.server.common.FileRegion; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.Before; import org.junit.Rule; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index a3627116133e0..fc2a998acb3d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -48,11 +48,10 @@ import java.util.TreeSet; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -85,6 +84,7 @@ import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; import org.junit.Before; import org.junit.Test; @@ -93,9 +93,10 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.slf4j.event.Level; public class TestBPOfferService { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index dc663c921d087..098d8a46f5762 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.server.datanode; -import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationException; import org.apache.hadoop.fs.BlockLocation; @@ -51,11 +49,14 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; import org.junit.After; import org.junit.Assert; import org.junit.Test; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; + import java.io.File; import java.io.IOException; import java.util.ArrayList; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index 39ea21fd20f76..602ac008fab4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -33,13 +33,12 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.List; - import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import net.jcip.annotations.NotThreadSafe; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index fc11b9ae5bb23..7c1c27b4dd4ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -30,13 +30,6 @@ import java.util.ArrayList; import java.util.Collections; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - -import org.junit.Assert; - import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -47,9 +40,14 @@ import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.VersionInfo; + +import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestDatanodeRegister { public static final Logger LOG = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index fbd9f005e3895..dbb95487853c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.concurrent.TimeoutException; import java.util.function.Supplier; @@ -25,11 +28,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner; import org.apache.hadoop.hdfs.server.datanode.LocalReplica; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Paths; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; @@ -75,6 +74,7 @@ import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.FakeTimer; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java index 198e3ccfd0535..98465dc9e98ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.net.URI; import java.util.List; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -50,12 +51,11 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestDataNodeUGIProvider { private final URI uri = URI.create(WebHdfsConstants.WEBHDFS_SCHEME + "://" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java index 1344ee0c88162..1d6331d83c558 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java @@ -63,6 +63,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.junit.After; @@ -71,8 +72,6 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * Tests various CLI commands of DiskBalancer. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java index 3a9ad2573b841..5b1f6e4d4d6ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -49,6 +49,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -58,7 +59,6 @@ import org.junit.rules.ExpectedException; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Tests NameNode interaction for all ACL modification APIs. This test suite diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index a7f13eacf6c24..7f4a0ce54197d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -59,15 +59,16 @@ import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.mockito.Mockito; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.io.Files; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java index 44e86ae66a87d..cc5133fcbc086 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java @@ -41,6 +41,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; import static org.apache.hadoop.fs.permission.AclEntryType.USER; @@ -59,7 +60,6 @@ import org.junit.BeforeClass; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java index 8e01f732483a4..fb467516791d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java @@ -21,7 +21,6 @@ import static org.apache.hadoop.fs.permission.AclEntryScope.*; import static org.apache.hadoop.fs.permission.AclEntryType.*; import static org.apache.hadoop.fs.permission.FsAction.*; -import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -29,16 +28,13 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.AclException; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * Tests that the configuration flag that controls support for ACLs is off by * default and causes all attempted operations related to ACLs to fail. The diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java index a4df00ed06c20..91c1493454a57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java @@ -26,13 +26,14 @@ import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.junit.Test; - import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.protocol.AclException; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; + +import org.junit.Test; /** * Tests operations that modify ACLs. All tests in this suite have been diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index ee98160d8ea21..aa2c7f689766f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -39,6 +38,7 @@ import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.util.Lists; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index a6ab37e1252f1..8481b5753f8cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -28,6 +28,7 @@ import java.net.InetSocketAddress; import java.util.Collections; import java.util.List; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,14 +53,13 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.slf4j.event.Level; import org.junit.Before; import org.junit.Test; -import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestBackupNode { public static final Logger LOG = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 3e1198d3e24f0..064d5ae83957a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -44,6 +44,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.function.Supplier; import org.apache.commons.cli.ParseException; import org.slf4j.Logger; @@ -85,6 +86,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil.ExitException; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.slf4j.event.Level; import org.junit.After; @@ -95,10 +97,8 @@ import org.mockito.stubbing.Answer; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index ae27730d9ce8d..17803a078690f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -57,8 +57,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.hdfs.server.common.Storage; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FileSystem; @@ -89,6 +87,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil.ExitException; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.AppenderSkeleton; @@ -104,7 +103,9 @@ import org.xml.sax.SAXException; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class tests the creation and validation of a checkpoint. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java index c159d4d61c09b..556a26dc0338a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java @@ -28,8 +28,6 @@ import java.util.Random; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; @@ -43,11 +41,13 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.junit.After; import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index f4cd33bc9ffa6..185db6916ab0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -35,7 +35,6 @@ import java.util.ArrayList; import java.util.EnumSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; @@ -53,8 +52,8 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.NativeCodeLoader; -import org.junit.Assert; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -82,6 +81,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Time; +import org.junit.Assert; import org.junit.Assume; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java index 81251ee81cb7e..55ff19052a24d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java @@ -29,18 +29,16 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.util.Lists; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - public class TestFSImageWithAcl { private static Configuration conf; private static MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java index f4c5763336b0b..512d102983596 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java @@ -38,6 +38,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; + import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -45,8 +47,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - public class TestINodeAttributeProvider { private static final Logger LOG = LoggerFactory.getLogger(TestINodeAttributeProvider.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java index 61eeb362a183c..54b401d629ecc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -37,6 +36,8 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.util.Lists; + import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java index 8314a85020eed..c0f0970dfbd7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java @@ -43,16 +43,15 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger; +import org.apache.hadoop.util.Lists; + import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index 0f2121663b4ae..44c3984d98a63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -28,11 +28,8 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ThreadLocalRandom; - import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -58,11 +55,15 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; +import org.apache.hadoop.util.Lists; + import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.slf4j.event.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java index bd4cb1fa2d80b..2b42adc463660 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java @@ -40,12 +40,13 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; + import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getFileInfo; import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Test cases for the handling of edit logs during failover diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index 562eb198fdb8e..affa348c993df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -67,13 +67,14 @@ import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.Whitebox; -import org.slf4j.event.Level; +import org.apache.hadoop.util.Lists; + import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.slf4j.event.Level; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Tests that exercise safemode in an HA cluster. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 25b800a19614e..513f60cb1eded 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -17,14 +17,19 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; +import java.net.BindException; +import java.net.URI; +import java.net.URL; +import java.util.List; +import java.util.Random; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.hdfs.LogVerificationAppender; -import org.apache.log4j.spi.LoggingEvent; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataOutputStream; @@ -32,6 +37,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.common.Util; @@ -45,23 +51,19 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ThreadUtil; +import org.apache.log4j.spi.LoggingEvent; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import java.io.File; -import java.io.IOException; -import java.io.OutputStream; -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; -import java.net.BindException; -import java.net.URI; -import java.net.URL; -import java.util.List; -import java.util.Random; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.junit.Assert.*; import static org.mockito.ArgumentMatchers.any; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java index 4f7b993d76800..afe564d5373d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java @@ -27,9 +27,8 @@ import java.net.URI; import java.util.Iterator; import java.util.List; +import java.util.function.Supplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -41,16 +40,18 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; + import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getFileInfo; import static org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager.QJM_RPC_MAX_TXNS_KEY; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Test cases for in progress tailing edit logs by diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java index 511607e1174d0..9f911fa5a6ea5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java @@ -48,6 +48,8 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; + import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -55,8 +57,6 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * Tests interaction of ACLs with snapshots. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java index dd1830f1cd55d..e864b91327989 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import java.util.ArrayList; + import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -28,12 +29,12 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.QuotaCounts; import org.apache.hadoop.test.Whitebox; +import org.apache.hadoop.util.Lists; + import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; -import java.util.ArrayList; - import static org.apache.hadoop.fs.StorageType.DISK; import static org.apache.hadoop.fs.StorageType.SSD; import static org.junit.Assert.assertEquals; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 736d66f2f4a78..e9e1cad25a922 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -17,6 +17,19 @@ */ package org.apache.hadoop.hdfs.tools; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.PrintStream; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Scanner; +import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; + import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; @@ -26,16 +39,11 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY; -import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.commons.io.FileUtils; import org.apache.commons.text.TextStringBuilder; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationUtil; import org.apache.hadoop.fs.ChecksumException; @@ -70,23 +78,15 @@ import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; + import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.Assert; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.IOException; -import java.io.PrintStream; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Scanner; -import java.util.concurrent.TimeoutException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION; import static org.hamcrest.CoreMatchers.allOf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java index 161d072489a81..6119348f30b57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java @@ -41,14 +41,14 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; + import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * Tests DFSAdmin with ViewFileSystemOverloadScheme with configured mount links. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java index 83e49d9069dba..bc6eb50b54642 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java @@ -40,14 +40,14 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.PathUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; + import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * Tests HDFS commands with ViewFileSystemOverloadScheme with configured mount * links. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 7bf3bfc1f8e84..5c91530bd9d58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -17,38 +17,15 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; -import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; -import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; -import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; -import static org.apache.hadoop.fs.permission.AclEntryType.USER; -import static org.apache.hadoop.fs.permission.FsAction.ALL; -import static org.apache.hadoop.fs.permission.FsAction.EXECUTE; -import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; -import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState; -import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; -import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_NAME; -import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_POLICY; -import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_POLICY_CELL_SIZE; -import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_POLICY_NAME; -import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_POLICY_STATE; -import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA; -import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA_CODEC_NAME; -import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA_OPTION; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; -import org.apache.hadoop.io.erasurecode.ECSchema; -import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.parsers.SAXParser; +import javax.xml.parsers.SAXParserFactory; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -78,20 +55,9 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.parsers.SAXParser; -import javax.xml.parsers.SAXParserFactory; -import javax.xml.transform.Transformer; -import javax.xml.transform.TransformerFactory; -import javax.xml.transform.dom.DOMSource; -import javax.xml.transform.stream.StreamResult; - import org.apache.commons.io.FileUtils; import org.apache.commons.io.output.NullOutputStream; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; @@ -106,27 +72,39 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; -import org.apache.hadoop.hdfs.server.namenode.FsImageProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; +import org.apache.hadoop.hdfs.server.namenode.FsImageProto; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.erasurecode.ECSchema; +import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.slf4j.event.Level; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.protobuf.ByteString; + import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; @@ -136,9 +114,29 @@ import org.xml.sax.SAXException; import org.xml.sax.helpers.DefaultHandler; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.thirdparty.protobuf.ByteString; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; +import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; +import static org.apache.hadoop.fs.permission.AclEntryType.USER; +import static org.apache.hadoop.fs.permission.FsAction.ALL; +import static org.apache.hadoop.fs.permission.FsAction.EXECUTE; +import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE; +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; +import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_NAME; +import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_POLICY; +import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_POLICY_CELL_SIZE; +import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_POLICY_NAME; +import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_POLICY_STATE; +import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA; +import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA_CODEC_NAME; +import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA_OPTION; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; public class TestOfflineImageViewer { private static final Logger LOG = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java index b41ec9690ce30..4955846432a60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java @@ -37,8 +37,6 @@ import javax.xml.parsers.SAXParser; import javax.xml.parsers.SAXParserFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -51,6 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.Lists; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -58,6 +57,11 @@ import org.xml.sax.SAXException; import org.xml.sax.helpers.DefaultHandler; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; @@ -71,9 +75,6 @@ import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; import static org.junit.Assert.assertEquals; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; - /** * Tests OfflineImageViewer if the input fsimage has HDFS ACLs */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index a4107a5f05c57..839f8946be06b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -50,13 +50,14 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus.Flags; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; + import org.junit.Assert; import org.junit.Test; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestJsonUtil { From 9c0ab36fceeac59196a6aa605e7981628047f2b5 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 9 Jun 2021 11:45:47 +0530 Subject: [PATCH 397/666] YARN-10805. Replace Guava Lists usage by Hadoop's own Lists in hadoop-yarn-project (#3075) --- .../hadoop/yarn/service/webapp/ApiServer.java | 5 +-- .../service/client/TestApiServiceClient.java | 3 +- .../hadoop/yarn/service/MockServiceAM.java | 2 +- .../TestDefaultUpgradeComponentsFinder.java | 2 +- .../yarn/service/TestYarnNativeServices.java | 6 ++-- .../service/client/TestServiceClient.java | 2 +- .../instance/TestComponentInstance.java | 2 +- .../provider/TestAbstractProviderService.java | 2 +- .../yarn/service/utils/TestFilterUtils.java | 2 +- .../yarn/client/api/impl/NMClientImpl.java | 2 +- .../yarn/client/cli/NodeAttributesCLI.java | 5 +-- .../hadoop/yarn/webapp/ResponseInfo.java | 2 +- .../org/apache/hadoop/yarn/webapp/WebApp.java | 4 ++- .../hadoop/yarn/webapp/view/JQueryUI.java | 7 ++-- .../yarn/webapp/view/TwoColumnLayout.java | 7 ++-- .../yarn/api/BasePBImplRecordsTest.java | 10 +++--- ...tomResourceTypesConfigurationProvider.java | 2 +- .../resourceplugin/gpu/GpuDiscoverer.java | 8 +++-- .../nodemanager/TestContainerExecutor.java | 2 +- .../TestContainerManager.java | 3 +- .../launcher/TestContainerLaunch.java | 4 +-- .../gpu/TestGpuResourceAllocator.java | 3 +- .../com/nec/TestNECVEPlugin.java | 2 +- .../com/nec/TestVEDeviceDiscoverer.java | 3 +- .../fpga/TestIntelFpgaOpenclPlugin.java | 3 +- .../gpu/TestGpuResourcePlugin.java | 2 +- .../scheduler/SchedulerUtils.java | 12 ++++--- .../activities/ActivitiesManager.java | 5 +-- .../PlacementConstraintProcessor.java | 3 +- .../scheduler/fair/FSSchedulerNode.java | 10 +++--- .../scheduler/fair/FairScheduler.java | 14 ++++---- .../fair/FairSchedulerConfiguration.java | 7 ++-- .../server/resourcemanager/MockNodes.java | 2 +- .../resourcemanager/TestAppManager.java | 12 ++++--- .../applicationsmanager/MockAsm.java | 3 +- .../applicationsmanager/TestAMRestart.java | 6 ++-- .../placement/TestPlacementManager.java | 2 +- .../recovery/TestZKRMStateStore.java | 9 +++--- .../rmapp/TestRMAppTransitions.java | 8 +++-- .../scheduler/TestAbstractYarnScheduler.java | 3 +- .../scheduler/fair/FairSchedulerTestBase.java | 2 +- .../scheduler/fair/TestFairScheduler.java | 2 +- .../allocationfile/AllocationFileQueue.java | 2 +- .../AllocationFileQueuePlacementPolicy.java | 2 +- .../AllocationFileQueuePlacementRule.java | 2 +- ...TestFSConfigToCSConfigArgumentHandler.java | 2 +- .../TestQueuePlacementConverter.java | 3 +- .../webapp/ActivitiesTestUtils.java | 2 +- .../ResourceRequestsJsonVerifications.java | 5 +-- .../ResourceRequestsXmlVerifications.java | 5 +-- .../server/webproxy/TestProxyUriUtils.java | 3 +- hadoop-yarn-project/hadoop-yarn/pom.xml | 32 +++++++++++++++++++ 52 files changed, 155 insertions(+), 98 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java index c7dc50455dde0..c4c5a760e7751 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java @@ -17,14 +17,13 @@ package org.apache.hadoop.yarn.service.webapp; -import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.google.inject.Inject; import com.google.inject.Singleton; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -44,6 +43,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; + import javax.servlet.http.HttpServletRequest; import javax.ws.rs.*; import javax.ws.rs.core.Context; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java index 20b7f49a27d0f..1d08b82fff1c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java @@ -26,9 +26,8 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.eclipse.jetty.server.Server; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java index de1a4b98de4d5..848120b29d5fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.service; import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.registry.client.api.RegistryOperations; @@ -29,6 +28,7 @@ import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java index 4e773dc6f74de..012f204239bdb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.service; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.service.api.records.Component; import org.apache.hadoop.yarn.service.api.records.ConfigFile; import org.apache.hadoop.yarn.service.api.records.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java index 45318b267c38f..56aca5c89ab61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java @@ -18,14 +18,13 @@ package org.apache.hadoop.yarn.service; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.registry.client.binding.RegistryPathUtils; import org.apache.hadoop.registry.client.binding.RegistryUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; import org.apache.hadoop.yarn.api.records.*; import org.apache.hadoop.yarn.client.api.YarnClient; @@ -49,6 +48,7 @@ import org.apache.hadoop.yarn.service.exceptions.SliderException; import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; import org.apache.hadoop.yarn.service.utils.SliderFileSystem; + import org.hamcrest.CoreMatchers; import org.junit.After; import org.junit.Assert; @@ -59,6 +59,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; + import java.io.File; import java.io.IOException; import java.util.*; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java index af898a8f7fe86..85da12f4d0a9e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.service.client; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java index 488c1038e0d99..06bca6f1b0053 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.component.instance; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestAbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestAbstractProviderService.java index 01bec79f16de6..b56ccb5cf3777 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestAbstractProviderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestAbstractProviderService.java @@ -18,9 +18,9 @@ package org.apache.hadoop.yarn.service.provider; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestFilterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestFilterUtils.java index 70c2b648bd8f6..59cc441818a83 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestFilterUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestFilterUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto; import org.apache.hadoop.yarn.service.MockRunningServiceContext; import org.apache.hadoop.yarn.service.ServiceContext; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java index 7854407da6871..6b2cf46bfa945 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java @@ -27,11 +27,11 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.token.SecretManager.InvalidToken; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java index 45ea443c2bb7d..90b6500a7ad7a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java @@ -18,8 +18,6 @@ package org.apache.hadoop.yarn.client.cli; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.MissingArgumentException; @@ -30,6 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; @@ -51,6 +50,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes; import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java index 0ef5b221f8977..6a52f7e5c4ed7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java @@ -18,13 +18,13 @@ package org.apache.hadoop.yarn.webapp; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.google.inject.servlet.RequestScoped; import java.util.Iterator; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.Lists; /** * A class to help passing around request scoped info diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java index f6511b3c9c852..54adb8208c095 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java @@ -30,12 +30,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpServer2; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.webapp.view.RobotsTextPage; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; + import com.google.inject.Provides; import com.google.inject.servlet.GuiceFilter; import com.google.inject.servlet.ServletModule; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java index 5cf18df794fca..fa5a36884840c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java @@ -18,18 +18,17 @@ package org.apache.hadoop.yarn.webapp.view; +import java.util.List; + import static org.apache.commons.text.StringEscapeUtils.escapeEcmaScript; import static org.apache.hadoop.yarn.util.StringHelper.djoin; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.util.StringHelper.split; -import java.util.List; - import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.HTML; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) public class JQueryUI extends HtmlBlock { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java index f33474f7a0e1a..a707845cb0aa7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java @@ -18,15 +18,14 @@ package org.apache.hadoop.yarn.webapp.view; -import static org.apache.hadoop.yarn.util.StringHelper.join; - import java.util.List; +import static org.apache.hadoop.yarn.util.StringHelper.join; + import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * A simpler two column layout implementation with a header, a navigation bar * on the left, content on the right, and a footer. Works with resizable themes. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java index 8014016ce5366..5697923c9974d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java @@ -17,15 +17,17 @@ */ package org.apache.hadoop.yarn.api; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.lang3.Range; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.resource.PlacementConstraint; import org.apache.hadoop.yarn.api.resource.PlacementConstraints; + +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; + import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.lang.reflect.*; import java.nio.ByteBuffer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/CustomResourceTypesConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/CustomResourceTypesConfigurationProvider.java index 1f52f40cb504a..2b26151ccfd0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/CustomResourceTypesConfigurationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/CustomResourceTypesConfigurationProvider.java @@ -16,8 +16,8 @@ package org.apache.hadoop.yarn.util.resource; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.LocalConfigurationProvider; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java index 8d291569c5f12..b393cfcdeac40 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java @@ -20,19 +20,21 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourcesExceptionUtil.throwIfNecessary; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.GpuDeviceInformation; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.GpuDeviceInformationParser; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.PerGpuDeviceInformation; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java index 59a5a2d29f83e..ab142a1db0fa6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java @@ -30,10 +30,10 @@ import java.util.Timer; import java.util.TimerTask; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index b2f183c368ae5..7a65a799080f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -18,13 +18,14 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesResponse; import org.apache.hadoop.yarn.api.records.LocalizationState; import org.apache.hadoop.yarn.api.records.LocalizationStatus; import org.apache.hadoop.yarn.server.api.AuxiliaryLocalPathHandler; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; + import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.assertGauge; import static org.apache.hadoop.test.MetricsAsserts.assertGaugeGt; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index a9bcef77c3963..365e2bb6ad946 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -51,11 +51,10 @@ import java.util.Map; import java.util.Set; import java.util.StringTokenizer; +import java.util.function.Supplier; import java.util.jar.JarFile; import java.util.jar.Manifest; -import java.util.function.Supplier; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -67,6 +66,7 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceAllocator.java index 26726d9f54998..ba8a9309d02e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceAllocator.java @@ -38,6 +38,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; @@ -58,8 +59,6 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * Unit tests for GpuResourceAllocator. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestNECVEPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestNECVEPlugin.java index 58fc4c7dc1c47..86ef9058f26fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestNECVEPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestNECVEPlugin.java @@ -46,7 +46,7 @@ import org.apache.commons.compress.utils.Sets; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Shell.CommandExecutor; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestVEDeviceDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestVEDeviceDiscoverer.java index 1956a4ffde860..a423aa5f0bded 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestVEDeviceDiscoverer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestVEDeviceDiscoverer.java @@ -36,6 +36,7 @@ import java.util.function.Function; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Shell.CommandExecutor; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device; import org.junit.After; @@ -47,8 +48,6 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * Unit tests for VEDeviceDiscoverer class. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestIntelFpgaOpenclPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestIntelFpgaOpenclPlugin.java index 0b093953eef34..7c81a062e0951 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestIntelFpgaOpenclPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/TestIntelFpgaOpenclPlugin.java @@ -22,8 +22,9 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.fs.Path; + +import org.apache.hadoop.util.Lists; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java index da1a57ec005be..76eef632fed85 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java @@ -22,7 +22,7 @@ import static org.mockito.Mockito.when; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.GpuDeviceInformation; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.NMGpuResourceInfo; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index abb274e50990a..aadcb55c42943 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -23,13 +23,9 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -63,6 +59,12 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import static org.apache.hadoop.yarn.exceptions .InvalidResourceRequestException .GREATER_THAN_MAX_RESOURCE_MESSAGE_TEMPLATE; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java index 28bfb7662b919..b22e3ca807356 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java @@ -18,10 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; @@ -44,6 +43,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; import org.apache.hadoop.yarn.util.SystemClock; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.ConcurrentLinkedQueue; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java index b4d10f85e62eb..2387431e4a6fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext; import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor; import org.apache.hadoop.yarn.ams.ApplicationMasterServiceUtils; @@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.SchedulingResponse; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java index 65d258ab2aa8e..1fe8edc73398f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java @@ -18,12 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -35,6 +32,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.util.resource.Resources; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.Collection; import java.util.HashMap; import java.util.LinkedHashMap; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 3790c48ba08d2..d2d5e045f3241 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -18,17 +18,12 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; -import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; @@ -102,6 +97,13 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java index 80fb14478ad7c..2a74d56d925dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java @@ -25,12 +25,10 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; @@ -41,6 +39,9 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + @Private @Evolving public class FairSchedulerConfiguration extends Configuration { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index 0de6c572a2c22..c951ba2c641df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -25,6 +25,7 @@ import java.util.Map; import org.apache.hadoop.net.Node; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -43,7 +44,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Test helper to generate mock nodes diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index 7919650c7b60e..f916264a59f33 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -18,16 +18,12 @@ package org.apache.hadoop.yarn.server.resourcemanager; - -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataOutputBuffer; @@ -81,6 +77,12 @@ import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; + +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.junit.After; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java index f8f0d34b81d82..4640952016d7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ipc.CallerContext; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -56,8 +57,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.util.Records; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - @InterfaceAudience.Private public abstract class MockAsm extends MockApps { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java index b2af776aa568f..82c534e6aad79 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java @@ -26,10 +26,9 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.thirdparty.com.google.common.base.Throwables; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; @@ -70,6 +69,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; import org.apache.hadoop.yarn.util.ControlledClock; import org.apache.hadoop.yarn.util.Records; + +import org.apache.hadoop.thirdparty.com.google.common.base.Throwables; + import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java index 720024779db19..e27c4ec532171 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.placement; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java index 7ffaba5eb61a3..c8aba283268c2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java @@ -25,8 +25,6 @@ import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.metrics2.impl.MetricsRecords; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAServiceProtocol; @@ -37,6 +35,7 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.*; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl; @@ -75,9 +74,10 @@ import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertArrayEquals; @@ -97,6 +97,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Supplier; import javax.crypto.SecretKey; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index 92011808b7ec8..8d4bb8e54554a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -18,9 +18,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; @@ -32,6 +29,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -88,6 +86,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Records; + import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -95,6 +94,9 @@ import org.junit.runners.Parameterized; import org.mockito.ArgumentCaptor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java index 990b7a3c464ec..76fbf0936f1ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java @@ -33,10 +33,10 @@ import java.util.Set; import java.util.stream.Collectors; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; @@ -86,6 +86,7 @@ import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.resource.Resources; + import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java index 274152f47afbf..5508f0f2bde85 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index c8a2225f5669a..2b768bebe92c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -18,7 +18,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAServiceProtocol; @@ -26,6 +25,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.GroupMappingServiceProvider; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueue.java index c37e9a840d286..ade1a8b0a42e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueue.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocationfile; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import java.io.PrintWriter; import java.io.StringWriter; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementPolicy.java index fa878c7715507..e41582747a165 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementPolicy.java @@ -17,7 +17,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocationfile; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import java.io.PrintWriter; import java.io.StringWriter; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementRule.java index 2ebb56433a7b9..b1aeed3dc5de0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementRule.java @@ -17,7 +17,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocationfile; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import java.io.PrintWriter; import java.io.StringWriter; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java index 2fdd90ca073cd..cb8cc587f68f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java @@ -31,7 +31,7 @@ import java.util.List; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.junit.After; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestQueuePlacementConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestQueuePlacementConverter.java index a1a19f070212e..6599080aab59c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestQueuePlacementConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestQueuePlacementConverter.java @@ -27,6 +27,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.placement.ApplicationPlacementContext; @@ -51,8 +52,6 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; - /** * Unit tests for QueuePlacementConverter. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java index cbd1d76f89281..dd08e4b129750 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java @@ -18,10 +18,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; import org.apache.hadoop.http.JettyUtils; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.Priority; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java index 393a31ea179f7..00566af9cd753 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java @@ -18,8 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -27,6 +26,8 @@ import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; + import java.util.List; import java.util.Map; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java index 40e626098f1dc..37d9feb2b3f8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java @@ -18,8 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; @@ -27,6 +26,8 @@ import org.w3c.dom.Element; import org.w3c.dom.NodeList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; + import java.util.List; import java.util.Map; import java.util.Set; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestProxyUriUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestProxyUriUtils.java index e4829d1f13c4f..20e5cdb90074e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestProxyUriUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestProxyUriUtils.java @@ -24,12 +24,13 @@ import java.net.URISyntaxException; import java.util.List; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.TrackingUriPlugin; + import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestProxyUriUtils { @Test diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index e97e35608553a..66793271befa0 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -78,6 +78,38 @@ org.apache.hadoop.yarn.proto:org.apache.hadoop.yarn.federation.proto:org.apache.hadoop.yarn.service + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided Lists rather than Guava provided Lists + + org.apache.hadoop.thirdparty.com.google.common.collect.Lists + com.google.common.collect.Lists + + + + + + + From 5e0e67f8c90d7129d4acf4295474e90dd413039f Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Wed, 9 Jun 2021 15:32:07 +0900 Subject: [PATCH 398/666] HADOOP-17750. Fix asf license errors in newly added files by HADOOP-17727. (#3083) Reviewed-by: Viraj Jasani Reviewed-by: Masatake Iwasaki Reviewed-by: Akira Ajisaka Reviewed-by: Gautham B A --- pom.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pom.xml b/pom.xml index 439ae42f293c9..3ed9182184d7b 100644 --- a/pom.xml +++ b/pom.xml @@ -456,6 +456,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x **/*.js licenses/** licenses-binary/** + dev-support/docker/pkg-resolver/packages.json + dev-support/docker/pkg-resolver/platforms.json From 9e0615bb725b0e9bff0930d33e8224711b654504 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 10 Jun 2021 14:29:47 +0900 Subject: [PATCH 399/666] YARN-10803. [JDK 11] TestRMFailoverProxyProvider and TestNoHaRMFailoverProxyProvider fails by ClassCastException. (#3068) Reviewed-by: Takanobu Asanuma --- .../TestNoHaRMFailoverProxyProvider.java | 20 +++++++-------- .../client/TestRMFailoverProxyProvider.java | 25 +++++++++---------- 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestNoHaRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestNoHaRMFailoverProxyProvider.java index e7223b73eacc2..fb9c65b6c2bb7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestNoHaRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestNoHaRMFailoverProxyProvider.java @@ -140,9 +140,9 @@ public void testDefaultFPPGetOneProxy() throws Exception { // Create a proxy and mock a RMProxy Proxy mockProxy1 = new TestProxy((proxy, method, args) -> null); Class protocol = ApplicationClientProtocol.class; - RMProxy mockRMProxy = mock(RMProxy.class); - DefaultNoHARMFailoverProxyProvider fpp = - new DefaultNoHARMFailoverProxyProvider(); + RMProxy mockRMProxy = mock(RMProxy.class); + DefaultNoHARMFailoverProxyProvider fpp = + new DefaultNoHARMFailoverProxyProvider<>(); InetSocketAddress mockAdd1 = new InetSocketAddress(RM1_PORT); @@ -154,7 +154,7 @@ public void testDefaultFPPGetOneProxy() throws Exception { // Initialize failover proxy provider and get proxy from it. fpp.init(conf, mockRMProxy, protocol); - FailoverProxyProvider.ProxyInfo actualProxy1 = fpp.getProxy(); + FailoverProxyProvider.ProxyInfo actualProxy1 = fpp.getProxy(); assertEquals( "AutoRefreshRMFailoverProxyProvider doesn't generate " + "expected proxy", @@ -180,7 +180,7 @@ public void testDefaultFPPGetOneProxy() throws Exception { // Perform Failover and get proxy again from failover proxy provider fpp.performFailover(actualProxy1.proxy); - FailoverProxyProvider.ProxyInfo actualProxy2 = fpp.getProxy(); + FailoverProxyProvider.ProxyInfo actualProxy2 = fpp.getProxy(); assertEquals("AutoRefreshRMFailoverProxyProvider " + "doesn't generate expected proxy after failover", mockProxy1, actualProxy2.proxy); @@ -208,9 +208,9 @@ public void testAutoRefreshIPChange() throws Exception { Proxy mockProxy1 = new TestProxy((proxy, method, args) -> null); Proxy mockProxy2 = new TestProxy((proxy, method, args) -> null); Class protocol = ApplicationClientProtocol.class; - RMProxy mockRMProxy = mock(RMProxy.class); - AutoRefreshNoHARMFailoverProxyProvider fpp = - new AutoRefreshNoHARMFailoverProxyProvider(); + RMProxy mockRMProxy = mock(RMProxy.class); + AutoRefreshNoHARMFailoverProxyProvider fpp = + new AutoRefreshNoHARMFailoverProxyProvider<>(); // generate two address with different ports. InetSocketAddress mockAdd1 = new InetSocketAddress(RM1_PORT); @@ -224,7 +224,7 @@ public void testAutoRefreshIPChange() throws Exception { // Initialize proxy provider and get proxy from it. fpp.init(conf, mockRMProxy, protocol); - FailoverProxyProvider.ProxyInfo actualProxy1 = fpp.getProxy(); + FailoverProxyProvider.ProxyInfo actualProxy1 = fpp.getProxy(); assertEquals( "AutoRefreshRMFailoverProxyProvider doesn't generate " + "expected proxy", @@ -259,7 +259,7 @@ public void testAutoRefreshIPChange() throws Exception { // Perform Failover and get proxy again from failover proxy provider fpp.performFailover(actualProxy1.proxy); - FailoverProxyProvider.ProxyInfo actualProxy2 = fpp.getProxy(); + FailoverProxyProvider.ProxyInfo actualProxy2 = fpp.getProxy(); assertEquals("AutoRefreshNoHARMFailoverProxyProvider " + "doesn't generate expected proxy after failover", mockProxy2, actualProxy2.proxy); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailoverProxyProvider.java index bda72d8b92e3b..ce9af23744fec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailoverProxyProvider.java @@ -87,9 +87,9 @@ public void testFailoverChange() throws Exception { Proxy mockProxy1 = new TestProxy((proxy, method, args) -> null); Class protocol = ApplicationClientProtocol.class; - RMProxy mockRMProxy = mock(RMProxy.class); - ConfiguredRMFailoverProxyProvider fpp = - new ConfiguredRMFailoverProxyProvider(); + RMProxy mockRMProxy = mock(RMProxy.class); + ConfiguredRMFailoverProxyProvider fpp = + new ConfiguredRMFailoverProxyProvider<>(); // generate two address with different ports. // Default port of yarn RM @@ -104,7 +104,7 @@ public void testFailoverChange() throws Exception { // Initialize failover proxy provider and get proxy from it. fpp.init(conf, mockRMProxy, protocol); - FailoverProxyProvider.ProxyInfo actualProxy1 = fpp.getProxy(); + FailoverProxyProvider.ProxyInfo actualProxy1 = fpp.getProxy(); assertEquals( "ConfiguredRMFailoverProxyProvider doesn't generate " + "expected proxy", @@ -139,7 +139,7 @@ public void testFailoverChange() throws Exception { // Perform Failover and get proxy again from failover proxy provider fpp.performFailover(actualProxy1.proxy); - FailoverProxyProvider.ProxyInfo actualProxy2 = fpp.getProxy(); + FailoverProxyProvider.ProxyInfo actualProxy2 = fpp.getProxy(); assertEquals("ConfiguredRMFailoverProxyProvider " + "doesn't generate expected proxy after failover", mockProxy2, actualProxy2.proxy); @@ -167,7 +167,7 @@ public void testFailoverChange() throws Exception { // Perform Failover and get proxy again from failover proxy provider fpp.performFailover(actualProxy2.proxy); - FailoverProxyProvider.ProxyInfo actualProxy3 = fpp.getProxy(); + FailoverProxyProvider.ProxyInfo actualProxy3 = fpp.getProxy(); // check the proxy is the same as the one we created before. assertEquals("ConfiguredRMFailoverProxyProvider " + @@ -202,11 +202,10 @@ public void testAutoRefreshFailoverChange() throws Exception { // Create three proxies and mock a RMProxy Proxy mockProxy1 = new TestProxy((proxy, method, args) -> null); Proxy mockProxy2 = new TestProxy((proxy, method, args) -> null); - Proxy mockProxy3 = new TestProxy((proxy, method, args) -> null); Class protocol = ApplicationClientProtocol.class; - RMProxy mockRMProxy = mock(RMProxy.class); - AutoRefreshRMFailoverProxyProvider fpp = - new AutoRefreshRMFailoverProxyProvider(); + RMProxy mockRMProxy = mock(RMProxy.class); + AutoRefreshRMFailoverProxyProvider fpp = + new AutoRefreshRMFailoverProxyProvider<>(); // generate three address with different ports. InetSocketAddress mockAdd1 = new InetSocketAddress(RM1_PORT); @@ -222,7 +221,7 @@ public void testAutoRefreshFailoverChange() throws Exception { // Initialize failover proxy provider and get proxy from it. fpp.init(conf, mockRMProxy, protocol); - FailoverProxyProvider.ProxyInfo actualProxy1 = fpp.getProxy(); + FailoverProxyProvider.ProxyInfo actualProxy1 = fpp.getProxy(); assertEquals( "AutoRefreshRMFailoverProxyProvider doesn't generate " + "expected proxy", @@ -257,7 +256,7 @@ public void testAutoRefreshFailoverChange() throws Exception { // Perform Failover and get proxy again from failover proxy provider fpp.performFailover(actualProxy1.proxy); - FailoverProxyProvider.ProxyInfo actualProxy2 = fpp.getProxy(); + FailoverProxyProvider.ProxyInfo actualProxy2 = fpp.getProxy(); assertEquals("AutoRefreshRMFailoverProxyProvider " + "doesn't generate expected proxy after failover", mockProxy2, actualProxy2.proxy); @@ -285,7 +284,7 @@ public void testAutoRefreshFailoverChange() throws Exception { // Perform Failover and get proxy again from failover proxy provider fpp.performFailover(actualProxy2.proxy); - FailoverProxyProvider.ProxyInfo actualProxy3 = fpp.getProxy(); + FailoverProxyProvider.ProxyInfo actualProxy3 = fpp.getProxy(); // check the proxy is the same as the one we created before. assertEquals("ConfiguredRMFailoverProxyProvider " + From 0499070bc6046362060c4ff7da93f0b3739b9913 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Fri, 11 Jun 2021 08:45:52 +0530 Subject: [PATCH 400/666] HADOOP-17753. Keep restrict-imports-enforcer-rule for Guava Lists in top level hadoop-main pom (#3087) --- .../hadoop-cloud-storage/pom.xml | 37 ------------------- .../hadoop-cos/pom.xml | 32 ---------------- .../hadoop-huaweicloud/pom.xml | 32 ---------------- hadoop-common-project/hadoop-common/pom.xml | 32 ---------------- hadoop-common-project/hadoop-registry/pom.xml | 32 ---------------- .../hadoop-hdfs-client/pom.xml | 32 ---------------- .../hadoop-hdfs-httpfs/pom.xml | 32 ---------------- hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 37 ------------------- hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 32 ---------------- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 32 ---------------- .../hadoop-mapreduce-client/pom.xml | 32 ---------------- .../hadoop-mapreduce-examples/pom.xml | 32 ---------------- .../plugin/resourcegz/ResourceGzMojo.java | 4 +- hadoop-tools/hadoop-aws/pom.xml | 32 ---------------- hadoop-tools/hadoop-azure/pom.xml | 37 +------------------ hadoop-tools/hadoop-distcp/pom.xml | 32 ---------------- .../hadoop-dynamometer-infra/pom.xml | 32 ---------------- .../hadoop-dynamometer-workload/pom.xml | 32 ---------------- hadoop-tools/hadoop-kafka/pom.xml | 32 ---------------- hadoop-yarn-project/hadoop-yarn/pom.xml | 32 ---------------- pom.xml | 8 ++++ 21 files changed, 12 insertions(+), 623 deletions(-) diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml index 699ce1abfc692..a8f45a7f3a222 100644 --- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml @@ -32,43 +32,6 @@ cloud-storage - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - - - - org.apache.hadoop diff --git a/hadoop-cloud-storage-project/hadoop-cos/pom.xml b/hadoop-cloud-storage-project/hadoop-cos/pom.xml index b1f9ccb6e3e04..fa47e354c7998 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cos/pom.xml @@ -96,38 +96,6 @@ - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml b/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml index 9386152c5dd7d..43360c11cd9d2 100755 --- a/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-huaweicloud/pom.xml @@ -92,38 +92,6 @@ - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index c234daa46d001..249471cc8568d 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -662,38 +662,6 @@ - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-common-project/hadoop-registry/pom.xml b/hadoop-common-project/hadoop-registry/pom.xml index 8e8b1e064e7e7..d9102d1d9d71a 100644 --- a/hadoop-common-project/hadoop-registry/pom.xml +++ b/hadoop-common-project/hadoop-registry/pom.xml @@ -260,38 +260,6 @@ - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index 87e5b23c40361..d65e6030369b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -178,38 +178,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop.hdfs.protocol.proto - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index b58a6517dd72c..1916ef0e3b7f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -343,38 +343,6 @@ ${basedir}/dev-support/findbugsExcludeFile.xml - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index 8c02df5c7c515..ab21583964b5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -170,43 +170,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - - - - dist diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index f91288cd959a2..e17602d1f6466 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -305,38 +305,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index a6af11b6db687..95ac71bae22c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -437,38 +437,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 112a2c73e8ba8..55940ae4ff744 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -177,38 +177,6 @@ - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index c4f89acb41f4f..48cf27efe437f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -155,38 +155,6 @@ - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java index 5d99d28aa802d..0b0139eebedb1 100644 --- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java +++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java @@ -13,7 +13,6 @@ */ package org.apache.hadoop.maven.plugin.resourcegz; -import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.io.IOUtils; import org.apache.maven.plugin.AbstractMojo; import org.apache.maven.plugin.MojoExecutionException; @@ -27,6 +26,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Arrays; import java.util.List; import java.util.function.Consumer; import java.util.regex.Matcher; @@ -65,7 +65,7 @@ public void execute() throws MojoExecutionException, MojoFailureException { try { Path inputDir = new File(inputDirectory).toPath(); File outputDir = new File(outputDirectory); - List exts = Lists.newArrayList(extensions.split(",")); + List exts = Arrays.asList(extensions.split(",")); exts.replaceAll(String::trim); GZConsumer cons = new GZConsumer(inputDir.toFile(), outputDir); Files.walk(inputDir).filter(path -> { diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 414e69780ea6c..0cab5ada2169d 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -467,38 +467,6 @@ - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index 59aae51e60788..cc773ab777fc3 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -99,44 +99,11 @@ - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use hadoop-common provided Lists rather than Guava provided Lists - - org.apache.hadoop.thirdparty.com.google.common.collect.Lists - com.google.common.collect.Lists - - - - - - - + - + +# Apache Hadoop Changelog + +## Release 3.3.1 - 2021-06-13 + + + +### IMPORTANT ISSUES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HADOOP-17338](https://issues.apache.org/jira/browse/HADOOP-17338) | Intermittent S3AInputStream failures: Premature end of Content-Length delimited message body etc | Major | fs/s3 | Yongjun Zhang | Yongjun Zhang | +| [HDFS-15380](https://issues.apache.org/jira/browse/HDFS-15380) | RBF: Could not fetch real remote IP in RouterWebHdfsMethods | Major | webhdfs | tomscut | tomscut | + + +### NEW FEATURES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HADOOP-16916](https://issues.apache.org/jira/browse/HADOOP-16916) | ABFS: Delegation SAS generator for integration with Ranger | Minor | fs/azure | Thomas Marqardt | Thomas Marqardt | +| [HDFS-13183](https://issues.apache.org/jira/browse/HDFS-13183) | Standby NameNode process getBlocks request to reduce Active load | Major | balancer & mover, namenode | Xiaoqiao He | Xiaoqiao He | +| [HADOOP-17076](https://issues.apache.org/jira/browse/HADOOP-17076) | ABFS: Delegation SAS Generator Updates | Minor | fs/azure | Thomas Marqardt | Thomas Marqardt | +| [HADOOP-15891](https://issues.apache.org/jira/browse/HADOOP-15891) | Provide Regex Based Mount Point In Inode Tree | Major | viewfs | zhenzhao wang | zhenzhao wang | +| [HADOOP-17125](https://issues.apache.org/jira/browse/HADOOP-17125) | Using snappy-java in SnappyCodec | Major | common | DB Tsai | L. C. Hsieh | +| [HADOOP-17292](https://issues.apache.org/jira/browse/HADOOP-17292) | Using lz4-java in Lz4Codec | Major | common | L. C. Hsieh | L. C. Hsieh | +| [HDFS-15711](https://issues.apache.org/jira/browse/HDFS-15711) | Add Metrics to HttpFS Server | Major | httpfs | Ahmed Hussein | Ahmed Hussein | +| [MAPREDUCE-7315](https://issues.apache.org/jira/browse/MAPREDUCE-7315) | LocatedFileStatusFetcher to collect/publish IOStatistics | Minor | client | Steve Loughran | Steve Loughran | +| [HADOOP-16830](https://issues.apache.org/jira/browse/HADOOP-16830) | Add Public IOStatistics API | Major | fs, fs/s3 | Steve Loughran | Steve Loughran | +| [HDFS-15759](https://issues.apache.org/jira/browse/HDFS-15759) | EC: Verify EC reconstruction correctness on DataNode | Major | datanode, ec, erasure-coding | Toshihiko Uchida | Toshihiko Uchida | +| [HADOOP-16829](https://issues.apache.org/jira/browse/HADOOP-16829) | Über-jira: S3A Hadoop 3.3.1 features | Major | fs/s3 | Steve Loughran | Steve Loughran | + + +### IMPROVEMENTS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HDFS-15245](https://issues.apache.org/jira/browse/HDFS-15245) | Improve JournalNode web UI | Major | journal-node, ui | Jianfei Jiang | Jianfei Jiang | +| [HADOOP-16952](https://issues.apache.org/jira/browse/HADOOP-16952) | Add .diff to gitignore | Minor | . | Ayush Saxena | Ayush Saxena | +| [HADOOP-16954](https://issues.apache.org/jira/browse/HADOOP-16954) | Add -S option in "Count" command to show only Snapshot Counts | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-15247](https://issues.apache.org/jira/browse/HDFS-15247) | RBF: Provide Non DFS Used per DataNode in DataNode UI | Major | . | Ayush Saxena | Lisheng Sun | +| [MAPREDUCE-7199](https://issues.apache.org/jira/browse/MAPREDUCE-7199) | HsJobsBlock reuse JobACLsManager for checkAccess | Minor | . | Bibin Chundatt | Bilwa S T | +| [HDFS-15295](https://issues.apache.org/jira/browse/HDFS-15295) | AvailableSpaceBlockPlacementPolicy should use chooseRandomWithStorageTypeTwoTrial() for better performance. | Minor | . | Jinglun | Jinglun | +| [HADOOP-16054](https://issues.apache.org/jira/browse/HADOOP-16054) | Update Dockerfile to use Bionic | Major | build, test | Akira Ajisaka | Akira Ajisaka | +| [YARN-10237](https://issues.apache.org/jira/browse/YARN-10237) | Add isAbsoluteResource config for queue in scheduler response | Minor | scheduler | Prabhu Joseph | Prabhu Joseph | +| [HADOOP-16886](https://issues.apache.org/jira/browse/HADOOP-16886) | Add hadoop.http.idle\_timeout.ms to core-default.xml | Major | . | Wei-Chiu Chuang | Lisheng Sun | +| [HDFS-14283](https://issues.apache.org/jira/browse/HDFS-14283) | DFSInputStream to prefer cached replica | Major | . | Wei-Chiu Chuang | Lisheng Sun | +| [HDFS-15338](https://issues.apache.org/jira/browse/HDFS-15338) | listOpenFiles() should throw InvalidPathException in case of invalid paths | Minor | . | Jinglun | Jinglun | +| [YARN-10160](https://issues.apache.org/jira/browse/YARN-10160) | Add auto queue creation related configs to RMWebService#CapacitySchedulerQueueInfo | Major | . | Prabhu Joseph | Prabhu Joseph | +| [HDFS-15255](https://issues.apache.org/jira/browse/HDFS-15255) | Consider StorageType when DatanodeManager#sortLocatedBlock() | Major | . | Lisheng Sun | Lisheng Sun | +| [YARN-10260](https://issues.apache.org/jira/browse/YARN-10260) | Allow transitioning queue from DRAINING to RUNNING state | Major | . | Jonathan Hung | Bilwa S T | +| [HADOOP-17036](https://issues.apache.org/jira/browse/HADOOP-17036) | TestFTPFileSystem failing as ftp server dir already exists | Minor | fs, test | Steve Loughran | Mikhail Pryakhin | +| [HDFS-15356](https://issues.apache.org/jira/browse/HDFS-15356) | Unify configuration \`dfs.ha.allow.stale.reads\` to DFSConfigKeys | Major | hdfs | Xiaoqiao He | Xiaoqiao He | +| [HDFS-15358](https://issues.apache.org/jira/browse/HDFS-15358) | RBF: Unify router datanode UI with namenode datanode UI | Major | . | Ayush Saxena | Ayush Saxena | +| [HADOOP-17042](https://issues.apache.org/jira/browse/HADOOP-17042) | Hadoop distcp throws "ERROR: Tools helper ///usr/lib/hadoop/libexec/tools/hadoop-distcp.sh was not found" | Minor | tools/distcp | Aki Tanaka | Aki Tanaka | +| [HDFS-15202](https://issues.apache.org/jira/browse/HDFS-15202) | HDFS-client: boost ShortCircuit Cache | Minor | dfsclient | Danil Lipovoy | Danil Lipovoy | +| [HDFS-15207](https://issues.apache.org/jira/browse/HDFS-15207) | VolumeScanner skip to scan blocks accessed during recent scan peroid | Minor | datanode | Yang Yun | Yang Yun | +| [HDFS-14999](https://issues.apache.org/jira/browse/HDFS-14999) | Avoid Potential Infinite Loop in DFSNetworkTopology | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-13639](https://issues.apache.org/jira/browse/HDFS-13639) | SlotReleaser is not fast enough | Major | hdfs-client | Gang Xie | Lisheng Sun | +| [HDFS-15369](https://issues.apache.org/jira/browse/HDFS-15369) | Refactor method VolumeScanner#runLoop() | Minor | datanode | Yang Yun | Yang Yun | +| [HADOOP-14698](https://issues.apache.org/jira/browse/HADOOP-14698) | Make copyFromLocal's -t option available for put as well | Major | . | Andras Bokor | Andras Bokor | +| [HDFS-10792](https://issues.apache.org/jira/browse/HDFS-10792) | RedundantEditLogInputStream should log caught exceptions | Minor | namenode | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [YARN-6492](https://issues.apache.org/jira/browse/YARN-6492) | Generate queue metrics for each partition | Major | capacity scheduler | Jonathan Hung | Manikandan R | +| [HADOOP-17016](https://issues.apache.org/jira/browse/HADOOP-17016) | Adding Common Counters in ABFS | Major | fs/azure | Mehakmeet Singh | Mehakmeet Singh | +| [HADOOP-16828](https://issues.apache.org/jira/browse/HADOOP-16828) | Zookeeper Delegation Token Manager fetch sequence number by batch | Major | . | Fengnan Li | Fengnan Li | +| [HADOOP-14566](https://issues.apache.org/jira/browse/HADOOP-14566) | Add seek support for SFTP FileSystem | Minor | fs | Azhagu Selvan SP | Mikhail Pryakhin | +| [HADOOP-17047](https://issues.apache.org/jira/browse/HADOOP-17047) | TODO comments exist in trunk while the related issues are already fixed. | Trivial | . | Rungroj Maipradit | Rungroj Maipradit | +| [HADOOP-17020](https://issues.apache.org/jira/browse/HADOOP-17020) | Improve RawFileSystem Performance | Minor | fs | Rajesh Balamohan | Mehakmeet Singh | +| [HDFS-15406](https://issues.apache.org/jira/browse/HDFS-15406) | Improve the speed of Datanode Block Scan | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HADOOP-17090](https://issues.apache.org/jira/browse/HADOOP-17090) | Increase precommit job timeout from 5 hours to 20 hours | Major | build | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17084](https://issues.apache.org/jira/browse/HADOOP-17084) | Update Dockerfile\_aarch64 to use Bionic | Major | build, test | RuiChen | zhaorenhai | +| [YARN-8047](https://issues.apache.org/jira/browse/YARN-8047) | RMWebApp make external class pluggable | Minor | . | Bibin Chundatt | Bilwa S T | +| [YARN-10297](https://issues.apache.org/jira/browse/YARN-10297) | TestContinuousScheduling#testFairSchedulerContinuousSchedulingInitTime fails intermittently | Major | . | Jonathan Hung | Jim Brennan | +| [HADOOP-17127](https://issues.apache.org/jira/browse/HADOOP-17127) | Use RpcMetrics.TIMEUNIT to initialize rpc queueTime and processingTime | Minor | common | Jim Brennan | Jim Brennan | +| [HDFS-15404](https://issues.apache.org/jira/browse/HDFS-15404) | ShellCommandFencer should expose info about source | Major | . | Chen Liang | Chen Liang | +| [HADOOP-17147](https://issues.apache.org/jira/browse/HADOOP-17147) | Dead link in hadoop-kms/index.md.vm | Minor | documentation, kms | Akira Ajisaka | Xieming Li | +| [HADOOP-17113](https://issues.apache.org/jira/browse/HADOOP-17113) | Adding ReadAhead Counters in ABFS | Major | fs/azure | Mehakmeet Singh | Mehakmeet Singh | +| [YARN-10343](https://issues.apache.org/jira/browse/YARN-10343) | Legacy RM UI should include labeled metrics for allocated, total, and reserved resources. | Major | . | Eric Payne | Eric Payne | +| [YARN-1529](https://issues.apache.org/jira/browse/YARN-1529) | Add Localization overhead metrics to NM | Major | nodemanager | Gera Shegalov | Jim Brennan | +| [YARN-10361](https://issues.apache.org/jira/browse/YARN-10361) | Make custom DAO classes configurable into RMWebApp#JAXBContextResolver | Major | . | Prabhu Joseph | Bilwa S T | +| [YARN-10251](https://issues.apache.org/jira/browse/YARN-10251) | Show extended resources on legacy RM UI. | Major | . | Eric Payne | Eric Payne | +| [HDFS-15493](https://issues.apache.org/jira/browse/HDFS-15493) | Update block map and name cache in parallel while loading fsimage. | Major | namenode | Chengwei Wang | Chengwei Wang | +| [HADOOP-17057](https://issues.apache.org/jira/browse/HADOOP-17057) | ABFS driver enhancement - Allow customizable translation from AAD SPNs and security groups to Linux user and group | Major | fs/azure | Karthik Amarnath | Karthik Amarnath | +| [HADOOP-17194](https://issues.apache.org/jira/browse/HADOOP-17194) | Adding Context class for AbfsClient to pass AbfsConfigurations to limit number of parameters | Major | fs/azure | Mehakmeet Singh | Mehakmeet Singh | +| [HADOOP-17065](https://issues.apache.org/jira/browse/HADOOP-17065) | Adding Network Counters in ABFS | Major | fs/azure | Mehakmeet Singh | Mehakmeet Singh | +| [HADOOP-17159](https://issues.apache.org/jira/browse/HADOOP-17159) | Make UGI support forceful relogin from keytab ignoring the last login time | Major | security | Sandeep Guggilam | Sandeep Guggilam | +| [YARN-10407](https://issues.apache.org/jira/browse/YARN-10407) | Add phantomjsdriver.log to gitignore | Minor | . | Takanobu Asanuma | Takanobu Asanuma | +| [YARN-10353](https://issues.apache.org/jira/browse/YARN-10353) | Log vcores used and cumulative cpu in containers monitor | Minor | yarn | Jim Brennan | Jim Brennan | +| [YARN-10369](https://issues.apache.org/jira/browse/YARN-10369) | Make NMTokenSecretManagerInRM sending NMToken for nodeId DEBUG | Minor | yarn | Jim Brennan | Jim Brennan | +| [YARN-10390](https://issues.apache.org/jira/browse/YARN-10390) | LeafQueue: retain user limits cache across assignContainers() calls | Major | capacity scheduler, capacityscheduler | Muhammad Samir Khan | Muhammad Samir Khan | +| [HDFS-15574](https://issues.apache.org/jira/browse/HDFS-15574) | Remove unnecessary sort of block list in DirectoryScanner | Major | . | Stephen O'Donnell | Stephen O'Donnell | +| [HADOOP-17270](https://issues.apache.org/jira/browse/HADOOP-17270) | Fix testCompressorDecompressorWithExeedBufferLimit to cover the intended scenario | Minor | test | Masatake Iwasaki | Masatake Iwasaki | +| [HDFS-15581](https://issues.apache.org/jira/browse/HDFS-15581) | Access Controlled HTTPFS Proxy | Minor | httpfs | Richard | Richard | +| [HADOOP-17283](https://issues.apache.org/jira/browse/HADOOP-17283) | Hadoop - Upgrade to JQuery 3.5.1 | Major | . | Aryan Gupta | Aryan Gupta | +| [HADOOP-17267](https://issues.apache.org/jira/browse/HADOOP-17267) | Add debug-level logs in Filesystem#close | Minor | fs | Karen Coppage | Karen Coppage | +| [HADOOP-17284](https://issues.apache.org/jira/browse/HADOOP-17284) | Support BCFKS keystores for Hadoop Credential Provider | Major | . | Xiaoyu Yao | Xiaoyu Yao | +| [HDFS-15415](https://issues.apache.org/jira/browse/HDFS-15415) | Reduce locking in Datanode DirectoryScanner | Major | datanode | Stephen O'Donnell | Stephen O'Donnell | +| [YARN-10451](https://issues.apache.org/jira/browse/YARN-10451) | RM (v1) UI NodesPage can NPE when yarn.io/gpu resource type is defined. | Major | . | Eric Payne | Eric Payne | +| [HADOOP-17021](https://issues.apache.org/jira/browse/HADOOP-17021) | Add concat fs command | Minor | fs | Jinglun | Jinglun | +| [MAPREDUCE-7301](https://issues.apache.org/jira/browse/MAPREDUCE-7301) | Expose Mini MR Cluster attribute for testing | Minor | test | Swaroopa Kadam | Swaroopa Kadam | +| [HDFS-15567](https://issues.apache.org/jira/browse/HDFS-15567) | [SBN Read] HDFS should expose msync() API to allow downstream applications call it explicitly. | Major | ha, hdfs-client | Konstantin Shvachko | Konstantin Shvachko | +| [HDFS-15633](https://issues.apache.org/jira/browse/HDFS-15633) | Avoid redundant RPC calls for getDiskStatus | Major | dfsclient | Ayush Saxena | Ayush Saxena | +| [YARN-10450](https://issues.apache.org/jira/browse/YARN-10450) | Add cpu and memory utilization per node and cluster-wide metrics | Minor | yarn | Jim Brennan | Jim Brennan | +| [HADOOP-17302](https://issues.apache.org/jira/browse/HADOOP-17302) | Upgrade to jQuery 3.5.1 in hadoop-sls | Major | . | Aryan Gupta | Aryan Gupta | +| [HDFS-15652](https://issues.apache.org/jira/browse/HDFS-15652) | Make block size from NNThroughputBenchmark configurable | Minor | benchmarks | Hui Fei | Hui Fei | +| [YARN-10475](https://issues.apache.org/jira/browse/YARN-10475) | Scale RM-NM heartbeat interval based on node utilization | Minor | yarn | Jim Brennan | Jim Brennan | +| [HDFS-15665](https://issues.apache.org/jira/browse/HDFS-15665) | Balancer logging improvement | Major | balancer & mover | Konstantin Shvachko | Konstantin Shvachko | +| [HADOOP-17342](https://issues.apache.org/jira/browse/HADOOP-17342) | Creating a token identifier should not do kerberos name resolution | Major | common | Jim Brennan | Jim Brennan | +| [YARN-10479](https://issues.apache.org/jira/browse/YARN-10479) | RMProxy should retry on SocketTimeout Exceptions | Major | yarn | Jim Brennan | Jim Brennan | +| [HDFS-15623](https://issues.apache.org/jira/browse/HDFS-15623) | Respect configured values of rpc.engine | Major | hdfs | Hector Sandoval Chaverri | Hector Sandoval Chaverri | +| [HADOOP-17369](https://issues.apache.org/jira/browse/HADOOP-17369) | Bump up snappy-java to 1.1.8.1 | Minor | . | Masatake Iwasaki | Masatake Iwasaki | +| [YARN-10480](https://issues.apache.org/jira/browse/YARN-10480) | replace href tags with ng-href | Trivial | . | Gabriel Medeiros Coelho | Gabriel Medeiros Coelho | +| [HADOOP-17367](https://issues.apache.org/jira/browse/HADOOP-17367) | Add InetAddress api to ProxyUsers.authorize | Major | performance, security | Ahmed Hussein | Ahmed Hussein | +| [MAPREDUCE-7304](https://issues.apache.org/jira/browse/MAPREDUCE-7304) | Enhance the map-reduce Job end notifier to be able to notify the given URL via a custom class | Major | mrv2 | Daniel Fritsi | Zoltán Erdmann | +| [MAPREDUCE-7309](https://issues.apache.org/jira/browse/MAPREDUCE-7309) | Improve performance of reading resource request for mapper/reducers from config | Major | applicationmaster | Wangda Tan | Peter Bacsko | +| [HDFS-15694](https://issues.apache.org/jira/browse/HDFS-15694) | Avoid calling UpdateHeartBeatState inside DataNodeDescriptor | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15703](https://issues.apache.org/jira/browse/HDFS-15703) | Don't generate edits for set operations that are no-op | Major | namenode | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17392](https://issues.apache.org/jira/browse/HADOOP-17392) | Remote exception messages should not include the exception class | Major | ipc | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15706](https://issues.apache.org/jira/browse/HDFS-15706) | HttpFS: Log more information on request failures | Major | httpfs | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17389](https://issues.apache.org/jira/browse/HADOOP-17389) | KMS should log full UGI principal | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17425](https://issues.apache.org/jira/browse/HADOOP-17425) | Bump up snappy-java to 1.1.8.2 | Minor | . | L. C. Hsieh | L. C. Hsieh | +| [HDFS-15720](https://issues.apache.org/jira/browse/HDFS-15720) | namenode audit async logger should add some log4j config | Minor | hdfs | Max Xie | | +| [HDFS-15717](https://issues.apache.org/jira/browse/HDFS-15717) | Improve fsck logging | Major | logging, namenode | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15704](https://issues.apache.org/jira/browse/HDFS-15704) | Mitigate lease monitor's rapid infinite loop | Major | namenode | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15569](https://issues.apache.org/jira/browse/HDFS-15569) | Speed up the Storage#doRecover during datanode rolling upgrade | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-15751](https://issues.apache.org/jira/browse/HDFS-15751) | Add documentation for msync() API to filesystem.md | Major | documentation | Konstantin Shvachko | Konstantin Shvachko | +| [HADOOP-17454](https://issues.apache.org/jira/browse/HADOOP-17454) | [s3a] Disable bucket existence check - set fs.s3a.bucket.probe to 0 | Major | . | Gabor Bota | Gabor Bota | +| [YARN-10538](https://issues.apache.org/jira/browse/YARN-10538) | Add recommissioning nodes to the list of updated nodes returned to the AM | Major | . | Srinivas S T | Srinivas S T | +| [YARN-10541](https://issues.apache.org/jira/browse/YARN-10541) | capture the performance metrics of ZKRMStateStore | Minor | resourcemanager | Max Xie | Max Xie | +| [HADOOP-17408](https://issues.apache.org/jira/browse/HADOOP-17408) | Optimize NetworkTopology while sorting of block locations | Major | common, net | Ahmed Hussein | Ahmed Hussein | +| [YARN-4589](https://issues.apache.org/jira/browse/YARN-4589) | Diagnostics for localization timeouts is lacking | Major | . | Chang Li | Chang Li | +| [YARN-10562](https://issues.apache.org/jira/browse/YARN-10562) | Follow up changes for YARN-9833 | Major | yarn | Jim Brennan | Jim Brennan | +| [HDFS-15783](https://issues.apache.org/jira/browse/HDFS-15783) | Speed up BlockPlacementPolicyRackFaultTolerant#verifyBlockPlacement | Major | block placement | Akira Ajisaka | Akira Ajisaka | +| [YARN-10519](https://issues.apache.org/jira/browse/YARN-10519) | Refactor QueueMetricsForCustomResources class to move to yarn-common package | Major | . | Minni Mittal | Minni Mittal | +| [HADOOP-17484](https://issues.apache.org/jira/browse/HADOOP-17484) | Typo in hadop-aws index.md | Trivial | documentation, fs/s3 | Maksim | Maksim | +| [HADOOP-17478](https://issues.apache.org/jira/browse/HADOOP-17478) | Improve the description of hadoop.http.authentication.signature.secret.file | Minor | documentation | Akira Ajisaka | Akira Ajisaka | +| [MAPREDUCE-7317](https://issues.apache.org/jira/browse/MAPREDUCE-7317) | Add latency information in FileOutputCommitter.mergePaths | Minor | client | Jungtaek Lim | Jungtaek Lim | +| [HDFS-15789](https://issues.apache.org/jira/browse/HDFS-15789) | Lease renewal does not require namesystem lock | Major | hdfs | Jim Brennan | Jim Brennan | +| [HADOOP-17501](https://issues.apache.org/jira/browse/HADOOP-17501) | Fix logging typo in ShutdownHookManager | Major | common | Konstantin Shvachko | Fengnan Li | +| [HADOOP-17354](https://issues.apache.org/jira/browse/HADOOP-17354) | Move Jenkinsfile outside of the root directory | Major | build | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17508](https://issues.apache.org/jira/browse/HADOOP-17508) | Simplify dependency installation instructions | Trivial | documentation | Gautham Banasandra | Gautham Banasandra | +| [HADOOP-17509](https://issues.apache.org/jira/browse/HADOOP-17509) | Parallelize building of dependencies | Minor | build | Gautham Banasandra | Gautham Banasandra | +| [HDFS-15799](https://issues.apache.org/jira/browse/HDFS-15799) | Make DisallowedDatanodeException terse | Minor | hdfs | Richard | Richard | +| [HDFS-15813](https://issues.apache.org/jira/browse/HDFS-15813) | DataStreamer: keep sending heartbeat packets while streaming | Major | hdfs | Jim Brennan | Jim Brennan | +| [MAPREDUCE-7319](https://issues.apache.org/jira/browse/MAPREDUCE-7319) | Log list of mappers at trace level in ShuffleHandler audit log | Minor | yarn | Jim Brennan | Jim Brennan | +| [HADOOP-14402](https://issues.apache.org/jira/browse/HADOOP-14402) | roll out StreamCapabilities across output streams of all filesystems | Major | fs, fs/adl, fs/azure, fs/oss, fs/s3, fs/swift | Steve Loughran | Steve Loughran | +| [HDFS-15821](https://issues.apache.org/jira/browse/HDFS-15821) | Add metrics for in-service datanodes | Minor | . | Zehao Chen | Zehao Chen | +| [YARN-10626](https://issues.apache.org/jira/browse/YARN-10626) | Log resource allocation in NM log at container start time | Major | . | Eric Badger | Eric Badger | +| [HDFS-15815](https://issues.apache.org/jira/browse/HDFS-15815) | if required storageType are unavailable, log the failed reason during choosing Datanode | Minor | block placement | Yang Yun | Yang Yun | +| [HDFS-15830](https://issues.apache.org/jira/browse/HDFS-15830) | Support to make dfs.image.parallel.load reconfigurable | Major | namenode | Hui Fei | Hui Fei | +| [HDFS-15835](https://issues.apache.org/jira/browse/HDFS-15835) | Erasure coding: Add/remove logs for the better readability/debugging | Minor | erasure-coding, hdfs | Bhavik Patel | Bhavik Patel | +| [HDFS-15826](https://issues.apache.org/jira/browse/HDFS-15826) | Solve the problem of incorrect progress of delegation tokens when loading FsImage | Major | . | JiangHua Zhu | JiangHua Zhu | +| [HDFS-15734](https://issues.apache.org/jira/browse/HDFS-15734) | [READ] DirectoryScanner#scan need not check StorageType.PROVIDED | Minor | datanode | Yuxuan Wang | Yuxuan Wang | +| [HADOOP-17538](https://issues.apache.org/jira/browse/HADOOP-17538) | Add kms-default.xml and httpfs-default.xml to site index | Minor | documentation | Masatake Iwasaki | Masatake Iwasaki | +| [YARN-10613](https://issues.apache.org/jira/browse/YARN-10613) | Config to allow Intra- and Inter-queue preemption to enable/disable conservativeDRF | Minor | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne | +| [YARN-10653](https://issues.apache.org/jira/browse/YARN-10653) | Fixed the findbugs issues introduced by YARN-10647. | Major | . | Qi Zhu | Qi Zhu | +| [MAPREDUCE-7324](https://issues.apache.org/jira/browse/MAPREDUCE-7324) | ClientHSSecurityInfo class is in wrong META-INF file | Major | . | Eric Badger | Eric Badger | +| [HADOOP-17546](https://issues.apache.org/jira/browse/HADOOP-17546) | Update Description of hadoop-http-auth-signature-secret in HttpAuthentication.md | Minor | . | Ravuri Sushma sree | Ravuri Sushma sree | +| [YARN-10664](https://issues.apache.org/jira/browse/YARN-10664) | Allow parameter expansion in NM\_ADMIN\_USER\_ENV | Major | yarn | Jim Brennan | Jim Brennan | +| [HADOOP-17570](https://issues.apache.org/jira/browse/HADOOP-17570) | Apply YETUS-1102 to re-enable GitHub comments | Major | build | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17594](https://issues.apache.org/jira/browse/HADOOP-17594) | DistCp: Expose the JobId for applications executing through run method | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-15907](https://issues.apache.org/jira/browse/HDFS-15907) | Reduce Memory Overhead of AclFeature by avoiding AtomicInteger | Major | namenode | Stephen O'Donnell | Stephen O'Donnell | +| [HDFS-15911](https://issues.apache.org/jira/browse/HDFS-15911) | Provide blocks moved count in Balancer iteration result | Major | balancer & mover | Viraj Jasani | Viraj Jasani | +| [HDFS-15919](https://issues.apache.org/jira/browse/HDFS-15919) | BlockPoolManager should log stack trace if unable to get Namenode addresses | Major | datanode | Stephen O'Donnell | Stephen O'Donnell | +| [HADOOP-17531](https://issues.apache.org/jira/browse/HADOOP-17531) | DistCp: Reduce memory usage on copying huge directories | Critical | . | Ayush Saxena | Ayush Saxena | +| [HDFS-15879](https://issues.apache.org/jira/browse/HDFS-15879) | Exclude slow nodes when choose targets for blocks | Major | . | tomscut | tomscut | +| [HADOOP-16870](https://issues.apache.org/jira/browse/HADOOP-16870) | Use spotbugs-maven-plugin instead of findbugs-maven-plugin | Major | build | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17222](https://issues.apache.org/jira/browse/HADOOP-17222) | Create socket address leveraging URI cache | Major | common, hdfs-client | fanrui | fanrui | +| [HDFS-15932](https://issues.apache.org/jira/browse/HDFS-15932) | Improve the balancer error message when process exits abnormally. | Major | . | Renukaprasad C | Renukaprasad C | +| [HADOOP-16524](https://issues.apache.org/jira/browse/HADOOP-16524) | Automatic keystore reloading for HttpServer2 | Major | . | Kihwal Lee | Borislav Iordanov | +| [HDFS-15931](https://issues.apache.org/jira/browse/HDFS-15931) | Fix non-static inner classes for better memory management | Major | . | Viraj Jasani | Viraj Jasani | +| [HADOOP-17371](https://issues.apache.org/jira/browse/HADOOP-17371) | Bump Jetty to the latest version 9.4.35 | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HDFS-15942](https://issues.apache.org/jira/browse/HDFS-15942) | Increase Quota initialization threads | Major | namenode | Stephen O'Donnell | Stephen O'Donnell | +| [HADOOP-17613](https://issues.apache.org/jira/browse/HADOOP-17613) | Log not flushed fully when daemon shutdown | Major | common | Renukaprasad C | Renukaprasad C | +| [HDFS-15937](https://issues.apache.org/jira/browse/HDFS-15937) | Reduce memory used during datanode layout upgrade | Major | datanode | Stephen O'Donnell | Stephen O'Donnell | +| [HDFS-15160](https://issues.apache.org/jira/browse/HDFS-15160) | ReplicaMap, Disk Balancer, Directory Scanner and various FsDatasetImpl methods should use datanode readlock | Major | datanode | Stephen O'Donnell | Stephen O'Donnell | +| [HADOOP-17569](https://issues.apache.org/jira/browse/HADOOP-17569) | Building native code fails on Fedora 33 | Major | build, common | Kengo Seki | Masatake Iwasaki | +| [HADOOP-17633](https://issues.apache.org/jira/browse/HADOOP-17633) | Please upgrade json-smart dependency to the latest version | Major | auth, build | helen huang | Viraj Jasani | +| [HADOOP-17620](https://issues.apache.org/jira/browse/HADOOP-17620) | DistCp: Use Iterator for listing target directory as well | Major | . | Ayush Saxena | Ayush Saxena | +| [YARN-10743](https://issues.apache.org/jira/browse/YARN-10743) | Add a policy for not aggregating for containers which are killed because exceeding container log size limit. | Major | . | Qi Zhu | Qi Zhu | +| [HDFS-15967](https://issues.apache.org/jira/browse/HDFS-15967) | Improve the log for Short Circuit Local Reads | Minor | . | Bhavik Patel | Bhavik Patel | +| [HADOOP-17675](https://issues.apache.org/jira/browse/HADOOP-17675) | LdapGroupsMapping$LdapSslSocketFactory ClassNotFoundException | Major | common | Tamas Mate | István Fajth | +| [HADOOP-11616](https://issues.apache.org/jira/browse/HADOOP-11616) | Remove workaround for Curator's ChildReaper requiring Guava 15+ | Major | . | Robert Kanter | Viraj Jasani | +| [HDFS-16003](https://issues.apache.org/jira/browse/HDFS-16003) | ProcessReport print invalidatedBlocks should judge debug level at first | Minor | namanode | lei w | lei w | +| [HDFS-16007](https://issues.apache.org/jira/browse/HDFS-16007) | Deserialization of ReplicaState should avoid throwing ArrayIndexOutOfBoundsException | Major | . | junwen yang | Viraj Jasani | +| [HADOOP-17615](https://issues.apache.org/jira/browse/HADOOP-17615) | ADLFS: Update SDK version from 2.3.6 to 2.3.9 | Minor | fs/adl | Bilahari T H | Bilahari T H | +| [HADOOP-16822](https://issues.apache.org/jira/browse/HADOOP-16822) | Provide source artifacts for hadoop-client-api | Major | . | Karel Kolman | Karel Kolman | +| [HADOOP-17680](https://issues.apache.org/jira/browse/HADOOP-17680) | Allow ProtobufRpcEngine to be extensible | Major | common | Hector Sandoval Chaverri | Hector Sandoval Chaverri | +| [YARN-10258](https://issues.apache.org/jira/browse/YARN-10258) | Add metrics for 'ApplicationsRunning' in NodeManager | Minor | nodemanager | ANANDA G B | ANANDA G B | +| [HDFS-15790](https://issues.apache.org/jira/browse/HDFS-15790) | Make ProtobufRpcEngineProtos and ProtobufRpcEngineProtos2 Co-Exist | Critical | . | David Mollitor | Vinayakumar B | + + +### BUG FIXES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HDFS-15196](https://issues.apache.org/jira/browse/HDFS-15196) | RBF: RouterRpcServer getListing cannot list large dirs correctly | Critical | . | Fengnan Li | Fengnan Li | +| [HDFS-15252](https://issues.apache.org/jira/browse/HDFS-15252) | HttpFS: setWorkingDirectory should not accept invalid paths | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-15249](https://issues.apache.org/jira/browse/HDFS-15249) | ThrottledAsyncChecker is not thread-safe. | Major | federation | Toshihiro Suzuki | Toshihiro Suzuki | +| [HDFS-15266](https://issues.apache.org/jira/browse/HDFS-15266) | Add missing DFSOps Statistics in WebHDFS | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-15275](https://issues.apache.org/jira/browse/HDFS-15275) | HttpFS: Response of Create was not correct with noredirect and data are true | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-15281](https://issues.apache.org/jira/browse/HDFS-15281) | ZKFC ignores dfs.namenode.rpc-bind-host and uses dfs.namenode.rpc-address to bind to host address | Major | ha, namenode | Dhiraj Hegde | Dhiraj Hegde | +| [HDFS-15297](https://issues.apache.org/jira/browse/HDFS-15297) | TestNNHandlesBlockReportPerStorage::blockReport\_02 fails intermittently in trunk | Major | datanode, test | Mingliang Liu | Ayush Saxena | +| [HDFS-15210](https://issues.apache.org/jira/browse/HDFS-15210) | EC : File write hanged when DN is shutdown by admin command. | Major | ec | Surendra Singh Lilhore | Surendra Singh Lilhore | +| [HDFS-15285](https://issues.apache.org/jira/browse/HDFS-15285) | The same distance and load nodes don't shuffle when consider DataNode load | Major | . | Lisheng Sun | Lisheng Sun | +| [HDFS-15265](https://issues.apache.org/jira/browse/HDFS-15265) | HttpFS: validate content-type in HttpFSUtils | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-15320](https://issues.apache.org/jira/browse/HDFS-15320) | StringIndexOutOfBoundsException in HostRestrictingAuthorizationFilter | Major | webhdfs | Akira Ajisaka | Akira Ajisaka | +| [YARN-10256](https://issues.apache.org/jira/browse/YARN-10256) | Refactor TestContainerSchedulerQueuing.testContainerUpdateExecTypeGuaranteedToOpportunistic | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15270](https://issues.apache.org/jira/browse/HDFS-15270) | Account for \*env == NULL in hdfsThreadDestructor | Major | . | Babneet Singh | Babneet Singh | +| [HDFS-15331](https://issues.apache.org/jira/browse/HDFS-15331) | Remove invalid exclusions that minicluster dependency on HDFS | Major | . | Wanqiang Ji | Wanqiang Ji | +| [YARN-8959](https://issues.apache.org/jira/browse/YARN-8959) | TestContainerResizing fails randomly | Minor | . | Bibin Chundatt | Ahmed Hussein | +| [HDFS-15332](https://issues.apache.org/jira/browse/HDFS-15332) | Quota Space consumed was wrong in truncate with Snapshots | Major | . | Hemanth Boyina | Hemanth Boyina | +| [YARN-9017](https://issues.apache.org/jira/browse/YARN-9017) | PlacementRule order is not maintained in CS | Major | . | Bibin Chundatt | Bilwa S T | +| [HADOOP-17025](https://issues.apache.org/jira/browse/HADOOP-17025) | Fix invalid metastore configuration in S3GuardTool tests | Minor | fs/s3, test | Masatake Iwasaki | Masatake Iwasaki | +| [HDFS-15339](https://issues.apache.org/jira/browse/HDFS-15339) | TestHDFSCLI fails for user names with the dot/dash character | Major | test | Yan Xiaole | Yan Xiaole | +| [HDFS-15250](https://issues.apache.org/jira/browse/HDFS-15250) | Setting \`dfs.client.use.datanode.hostname\` to true can crash the system because of unhandled UnresolvedAddressException | Major | . | Ctest | Ctest | +| [HADOOP-16768](https://issues.apache.org/jira/browse/HADOOP-16768) | SnappyCompressor test cases wrongly assume that the compressed data is always smaller than the input data | Major | io, test | zhao bo | Akira Ajisaka | +| [HDFS-1820](https://issues.apache.org/jira/browse/HDFS-1820) | FTPFileSystem attempts to close the outputstream even when it is not initialised | Major | hdfs-client | Sudharsan Sampath | Mikhail Pryakhin | +| [HDFS-15243](https://issues.apache.org/jira/browse/HDFS-15243) | Add an option to prevent sub-directories of protected directories from deletion | Major | 3.1.1 | liuyanyu | liuyanyu | +| [HDFS-14367](https://issues.apache.org/jira/browse/HDFS-14367) | EC: Parameter maxPoolSize in striped reconstruct thread pool isn't affecting number of threads | Major | ec | Guo Lei | Guo Lei | +| [YARN-9301](https://issues.apache.org/jira/browse/YARN-9301) | Too many InvalidStateTransitionException with SLS | Major | . | Bibin Chundatt | Bilwa S T | +| [HDFS-15300](https://issues.apache.org/jira/browse/HDFS-15300) | RBF: updateActiveNamenode() is invalid when RPC address is IP | Major | . | xuzq | xuzq | +| [HDFS-15316](https://issues.apache.org/jira/browse/HDFS-15316) | Deletion failure should not remove directory from snapshottables | Major | . | Hemanth Boyina | Hemanth Boyina | +| [YARN-8942](https://issues.apache.org/jira/browse/YARN-8942) | PriorityBasedRouterPolicy throws exception if all sub-cluster weights have negative value | Minor | . | Akshay Agarwal | Bilwa S T | +| [HADOOP-17044](https://issues.apache.org/jira/browse/HADOOP-17044) | Revert "HADOOP-8143. Change distcp to have -pb on by default" | Major | tools/distcp | Steve Loughran | Steve Loughran | +| [HADOOP-17024](https://issues.apache.org/jira/browse/HADOOP-17024) | ListStatus on ViewFS root (ls "/") should list the linkFallBack root (configured target root). | Major | fs, viewfs | Uma Maheswara Rao G | Abhishek Das | +| [MAPREDUCE-6826](https://issues.apache.org/jira/browse/MAPREDUCE-6826) | Job fails with InvalidStateTransitonException: Invalid event: JOB\_TASK\_COMPLETED at SUCCEEDED/COMMITTING | Major | . | Varun Saxena | Bilwa S T | +| [HADOOP-16900](https://issues.apache.org/jira/browse/HADOOP-16900) | Very large files can be truncated when written through S3AFileSystem | Major | fs/s3 | Andrew Olson | Mukund Thakur | +| [HADOOP-17049](https://issues.apache.org/jira/browse/HADOOP-17049) | javax.activation-api and jakarta.activation-api define overlapping classes | Major | build | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17040](https://issues.apache.org/jira/browse/HADOOP-17040) | Fix intermittent failure of ITestBlockingThreadPoolExecutorService | Minor | fs/s3, test | Masatake Iwasaki | Masatake Iwasaki | +| [HDFS-15363](https://issues.apache.org/jira/browse/HDFS-15363) | BlockPlacementPolicyWithNodeGroup should validate if it is initialized by NetworkTopologyWithNodeGroup | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-15093](https://issues.apache.org/jira/browse/HDFS-15093) | RENAME.TO\_TRASH is ignored When RENAME.OVERWRITE is specified | Major | . | Harshakiran Reddy | Ayush Saxena | +| [HDFS-12288](https://issues.apache.org/jira/browse/HDFS-12288) | Fix DataNode's xceiver count calculation | Major | datanode, hdfs | Lukas Majercak | Lisheng Sun | +| [HDFS-15362](https://issues.apache.org/jira/browse/HDFS-15362) | FileWithSnapshotFeature#updateQuotaAndCollectBlocks should collect all distinct blocks | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HADOOP-7002](https://issues.apache.org/jira/browse/HADOOP-7002) | Wrong description of copyFromLocal and copyToLocal in documentation | Minor | . | Jingguo Yao | Andras Bokor | +| [HADOOP-17052](https://issues.apache.org/jira/browse/HADOOP-17052) | NetUtils.connect() throws unchecked exception (UnresolvedAddressException) causing clients to abort | Major | net | Dhiraj Hegde | Dhiraj Hegde | +| [HADOOP-17018](https://issues.apache.org/jira/browse/HADOOP-17018) | Intermittent failing of ITestAbfsStreamStatistics in ABFS | Minor | fs/azure, test | Mehakmeet Singh | Mehakmeet Singh | +| [YARN-10254](https://issues.apache.org/jira/browse/YARN-10254) | CapacityScheduler incorrect User Group Mapping after leaf queue change | Major | . | Gergely Pollák | Gergely Pollák | +| [HADOOP-17062](https://issues.apache.org/jira/browse/HADOOP-17062) | Fix shelldocs path in Jenkinsfile | Major | build | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17056](https://issues.apache.org/jira/browse/HADOOP-17056) | shelldoc fails in hadoop-common | Major | build | Akira Ajisaka | Akira Ajisaka | +| [YARN-10286](https://issues.apache.org/jira/browse/YARN-10286) | PendingContainers bugs in the scheduler outputs | Critical | . | Adam Antal | Andras Gyori | +| [HDFS-15396](https://issues.apache.org/jira/browse/HDFS-15396) | Fix TestViewFileSystemOverloadSchemeHdfsFileSystemContract#testListStatusRootDir | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-15386](https://issues.apache.org/jira/browse/HDFS-15386) | ReplicaNotFoundException keeps happening in DN after removing multiple DN's data directories | Major | . | Toshihiro Suzuki | Toshihiro Suzuki | +| [HDFS-15398](https://issues.apache.org/jira/browse/HDFS-15398) | EC: hdfs client hangs due to exception during addBlock | Critical | ec, hdfs-client | Hongbing Wang | Hongbing Wang | +| [YARN-10300](https://issues.apache.org/jira/browse/YARN-10300) | appMasterHost not set in RM ApplicationSummary when AM fails before first heartbeat | Major | . | Eric Badger | Eric Badger | +| [HADOOP-17059](https://issues.apache.org/jira/browse/HADOOP-17059) | ArrayIndexOfboundsException in ViewFileSystem#listStatus | Major | viewfs | Hemanth Boyina | Hemanth Boyina | +| [YARN-10296](https://issues.apache.org/jira/browse/YARN-10296) | Make ContainerPBImpl#getId/setId synchronized | Minor | . | Benjamin Teke | Benjamin Teke | +| [HADOOP-17060](https://issues.apache.org/jira/browse/HADOOP-17060) | listStatus and getFileStatus behave inconsistent in the case of ViewFs implementation for isDirectory | Major | viewfs | Srinivasu Majeti | Uma Maheswara Rao G | +| [YARN-10312](https://issues.apache.org/jira/browse/YARN-10312) | Add support for yarn logs -logFile to retain backward compatibility | Major | client | Jim Brennan | Jim Brennan | +| [HDFS-15351](https://issues.apache.org/jira/browse/HDFS-15351) | Blocks scheduled count was wrong on truncate | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-15403](https://issues.apache.org/jira/browse/HDFS-15403) | NPE in FileIoProvider#transferToSocketFully | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-15372](https://issues.apache.org/jira/browse/HDFS-15372) | Files in snapshots no longer see attribute provider permissions | Major | . | Stephen O'Donnell | Stephen O'Donnell | +| [MAPREDUCE-7281](https://issues.apache.org/jira/browse/MAPREDUCE-7281) | Fix NoClassDefFoundError on 'mapred minicluster' | Major | . | Masatake Iwasaki | Masatake Iwasaki | +| [HADOOP-17029](https://issues.apache.org/jira/browse/HADOOP-17029) | ViewFS does not return correct user/group and ACL | Major | fs, viewfs | Abhishek Das | Abhishek Das | +| [HDFS-14546](https://issues.apache.org/jira/browse/HDFS-14546) | Document block placement policies | Major | . | Íñigo Goiri | Amithsha | +| [HADOOP-17068](https://issues.apache.org/jira/browse/HADOOP-17068) | client fails forever when namenode ipaddr changed | Major | hdfs-client | Sean Chow | Sean Chow | +| [HADOOP-17089](https://issues.apache.org/jira/browse/HADOOP-17089) | WASB: Update azure-storage-java SDK | Critical | fs/azure | Thomas Marqardt | Thomas Marqardt | +| [HDFS-15378](https://issues.apache.org/jira/browse/HDFS-15378) | TestReconstructStripedFile#testErasureCodingWorkerXmitsWeight is failing on trunk | Major | . | Hemanth Boyina | Hemanth Boyina | +| [YARN-9903](https://issues.apache.org/jira/browse/YARN-9903) | Support reservations continue looking for Node Labels | Major | . | Tarun Parimi | Jim Brennan | +| [YARN-10331](https://issues.apache.org/jira/browse/YARN-10331) | Upgrade node.js to 10.21.0 | Critical | build, yarn-ui-v2 | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17032](https://issues.apache.org/jira/browse/HADOOP-17032) | Handle an internal dir in viewfs having multiple children mount points pointing to different filesystems | Major | fs, viewfs | Abhishek Das | Abhishek Das | +| [YARN-10318](https://issues.apache.org/jira/browse/YARN-10318) | ApplicationHistory Web UI incorrect column indexing | Minor | yarn | Andras Gyori | Andras Gyori | +| [YARN-10330](https://issues.apache.org/jira/browse/YARN-10330) | Add missing test scenarios to TestUserGroupMappingPlacementRule and TestAppNameMappingPlacementRule | Major | capacity scheduler, capacityscheduler, test | Peter Bacsko | Peter Bacsko | +| [HDFS-15446](https://issues.apache.org/jira/browse/HDFS-15446) | CreateSnapshotOp fails during edit log loading for /.reserved/raw/path with error java.io.FileNotFoundException: Directory does not exist: /.reserved/raw/path | Major | hdfs | Srinivasu Majeti | Stephen O'Donnell | +| [HADOOP-17081](https://issues.apache.org/jira/browse/HADOOP-17081) | MetricsSystem doesn't start the sink adapters on restart | Minor | metrics | Madhusoodan | Madhusoodan | +| [HDFS-15451](https://issues.apache.org/jira/browse/HDFS-15451) | Restarting name node stuck in safe mode when using provided storage | Major | namenode | shanyu zhao | shanyu zhao | +| [HADOOP-17117](https://issues.apache.org/jira/browse/HADOOP-17117) | Fix typos in hadoop-aws documentation | Trivial | documentation, fs/s3 | Sebastian Nagel | Sebastian Nagel | +| [HADOOP-17120](https://issues.apache.org/jira/browse/HADOOP-17120) | Fix failure of docker image creation due to pip2 install error | Major | . | Masatake Iwasaki | Masatake Iwasaki | +| [YARN-10344](https://issues.apache.org/jira/browse/YARN-10344) | Sync netty versions in hadoop-yarn-csi | Major | build | Akira Ajisaka | Akira Ajisaka | +| [YARN-10341](https://issues.apache.org/jira/browse/YARN-10341) | Yarn Service Container Completed event doesn't get processed | Critical | . | Bilwa S T | Bilwa S T | +| [HADOOP-16998](https://issues.apache.org/jira/browse/HADOOP-16998) | WASB : NativeAzureFsOutputStream#close() throwing IllegalArgumentException | Major | fs/azure | Anoop Sam John | Anoop Sam John | +| [YARN-10348](https://issues.apache.org/jira/browse/YARN-10348) | Allow RM to always cancel tokens after app completes | Major | yarn | Jim Brennan | Jim Brennan | +| [MAPREDUCE-7284](https://issues.apache.org/jira/browse/MAPREDUCE-7284) | TestCombineFileInputFormat#testMissingBlocks fails | Major | test | Akira Ajisaka | Akira Ajisaka | +| [HDFS-14498](https://issues.apache.org/jira/browse/HDFS-14498) | LeaseManager can loop forever on the file for which create has failed | Major | namenode | Sergey Shelukhin | Stephen O'Donnell | +| [HADOOP-17130](https://issues.apache.org/jira/browse/HADOOP-17130) | Configuration.getValByRegex() shouldn't update the results while fetching. | Major | common | Mukund Thakur | Mukund Thakur | +| [HDFS-15198](https://issues.apache.org/jira/browse/HDFS-15198) | RBF: Add test for MountTableRefresherService failed to refresh other router MountTableEntries in secure mode | Major | rbf | zhengchenyu | zhengchenyu | +| [HADOOP-17119](https://issues.apache.org/jira/browse/HADOOP-17119) | Jetty upgrade to 9.4.x causes MR app fail with IOException | Major | . | Bilwa S T | Bilwa S T | +| [HDFS-15246](https://issues.apache.org/jira/browse/HDFS-15246) | ArrayIndexOfboundsException in BlockManager CreateLocatedBlock | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HADOOP-17138](https://issues.apache.org/jira/browse/HADOOP-17138) | Fix spotbugs warnings surfaced after upgrade to 4.0.6 | Minor | . | Masatake Iwasaki | Masatake Iwasaki | +| [YARN-4771](https://issues.apache.org/jira/browse/YARN-4771) | Some containers can be skipped during log aggregation after NM restart | Major | nodemanager | Jason Darrell Lowe | Jim Brennan | +| [YARN-10367](https://issues.apache.org/jira/browse/YARN-10367) | Failed to get nodejs 10.21.0 when building docker image | Blocker | build, webapp | Akira Ajisaka | Akira Ajisaka | +| [MAPREDUCE-7051](https://issues.apache.org/jira/browse/MAPREDUCE-7051) | Fix typo in MultipleOutputFormat | Trivial | . | ywheel | ywheel | +| [HDFS-15313](https://issues.apache.org/jira/browse/HDFS-15313) | Ensure inodes in active filesystem are not deleted during snapshot delete | Major | snapshots | Shashikant Banerjee | Shashikant Banerjee | +| [HDFS-14950](https://issues.apache.org/jira/browse/HDFS-14950) | missing libhdfspp libs in dist-package | Major | build, libhdfs++ | Yuan Zhou | Yuan Zhou | +| [YARN-10359](https://issues.apache.org/jira/browse/YARN-10359) | Log container report only if list is not empty | Minor | . | Bilwa S T | Bilwa S T | +| [YARN-10229](https://issues.apache.org/jira/browse/YARN-10229) | [Federation] Client should be able to submit application to RM directly using normal client conf | Major | amrmproxy, federation | JohnsonGuo | Bilwa S T | +| [HDFS-15503](https://issues.apache.org/jira/browse/HDFS-15503) | File and directory permissions are not able to be modified from WebUI | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HADOOP-17184](https://issues.apache.org/jira/browse/HADOOP-17184) | Add --mvn-custom-repos parameter to yetus calls | Major | build | Mingliang Liu | Mingliang Liu | +| [HDFS-15499](https://issues.apache.org/jira/browse/HDFS-15499) | Clean up httpfs/pom.xml to remove aws-java-sdk-s3 exclusion | Major | httpfs | Mingliang Liu | Mingliang Liu | +| [HADOOP-17186](https://issues.apache.org/jira/browse/HADOOP-17186) | Fixing javadoc in ListingOperationCallbacks | Major | build, documentation | Akira Ajisaka | Mukund Thakur | +| [HADOOP-17164](https://issues.apache.org/jira/browse/HADOOP-17164) | UGI loginUserFromKeytab doesn't set the last login time | Major | security | Sandeep Guggilam | Sandeep Guggilam | +| [YARN-4575](https://issues.apache.org/jira/browse/YARN-4575) | ApplicationResourceUsageReport should return ALL reserved resource | Major | . | Bibin Chundatt | Bibin Chundatt | +| [YARN-10388](https://issues.apache.org/jira/browse/YARN-10388) | RMNode updatedCapability flag not set while RecommissionNodeTransition | Major | resourcemanager | Pranjal Protim Borah | Pranjal Protim Borah | +| [HDFS-15443](https://issues.apache.org/jira/browse/HDFS-15443) | Setting dfs.datanode.max.transfer.threads to a very small value can cause strange failure. | Major | datanode | AMC-team | AMC-team | +| [HDFS-15508](https://issues.apache.org/jira/browse/HDFS-15508) | [JDK 11] Fix javadoc errors in hadoop-hdfs-rbf module | Major | documentation | Akira Ajisaka | Akira Ajisaka | +| [HDFS-15506](https://issues.apache.org/jira/browse/HDFS-15506) | [JDK 11] Fix javadoc errors in hadoop-hdfs module | Major | documentation | Akira Ajisaka | Xieming Li | +| [HDFS-15507](https://issues.apache.org/jira/browse/HDFS-15507) | [JDK 11] Fix javadoc errors in hadoop-hdfs-client module | Major | documentation | Akira Ajisaka | Xieming Li | +| [HADOOP-17196](https://issues.apache.org/jira/browse/HADOOP-17196) | Fix C/C++ standard warnings | Major | build | Gautham Banasandra | Gautham Banasandra | +| [HADOOP-17204](https://issues.apache.org/jira/browse/HADOOP-17204) | Fix typo in Hadoop KMS document | Trivial | documentation, kms | Akira Ajisaka | Xieming Li | +| [HADOOP-17192](https://issues.apache.org/jira/browse/HADOOP-17192) | ITestS3AHugeFilesSSECDiskBlock failing because of bucket overrides | Major | fs/s3 | Mukund Thakur | Mukund Thakur | +| [YARN-10336](https://issues.apache.org/jira/browse/YARN-10336) | RM page should throw exception when command injected in RM REST API to get applications | Major | . | Rajshree Mishra | Bilwa S T | +| [HDFS-15439](https://issues.apache.org/jira/browse/HDFS-15439) | Setting dfs.mover.retry.max.attempts to negative value will retry forever. | Major | balancer & mover | AMC-team | AMC-team | +| [YARN-10391](https://issues.apache.org/jira/browse/YARN-10391) | --module-gpu functionality is broken in container-executor | Major | nodemanager | Eric Badger | Eric Badger | +| [HADOOP-17122](https://issues.apache.org/jira/browse/HADOOP-17122) | Bug in preserving Directory Attributes in DistCp with Atomic Copy | Major | tools/distcp | Swaminathan Balachandran | | +| [HDFS-14504](https://issues.apache.org/jira/browse/HDFS-14504) | Rename with Snapshots does not honor quota limit | Major | . | Shashikant Banerjee | Hemanth Boyina | +| [HADOOP-17209](https://issues.apache.org/jira/browse/HADOOP-17209) | Erasure Coding: Native library memory leak | Major | native | Sean Chow | Sean Chow | +| [HADOOP-16925](https://issues.apache.org/jira/browse/HADOOP-16925) | MetricsConfig incorrectly loads the configuration whose value is String list in the properties file | Major | metrics | Jiayi Liu | Jiayi Liu | +| [HADOOP-17220](https://issues.apache.org/jira/browse/HADOOP-17220) | Upgrade slf4j to 1.7.30 ( To Address: CVE-2018-8088) | Major | . | Brahma Reddy Battula | Brahma Reddy Battula | +| [HDFS-14852](https://issues.apache.org/jira/browse/HDFS-14852) | Removing from LowRedundancyBlocks does not remove the block from all queues | Major | namenode | Hui Fei | Hui Fei | +| [HDFS-15536](https://issues.apache.org/jira/browse/HDFS-15536) | RBF: Clear Quota in Router was not consistent | Critical | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-15510](https://issues.apache.org/jira/browse/HDFS-15510) | RBF: Quota and Content Summary was not correct in Multiple Destinations | Critical | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-15540](https://issues.apache.org/jira/browse/HDFS-15540) | Directories protected from delete can still be moved to the trash | Major | namenode | Stephen O'Donnell | Stephen O'Donnell | +| [HADOOP-17129](https://issues.apache.org/jira/browse/HADOOP-17129) | Validating storage keys in ABFS correctly | Major | fs/azure | Mehakmeet Singh | Mehakmeet Singh | +| [HDFS-15471](https://issues.apache.org/jira/browse/HDFS-15471) | TestHDFSContractMultipartUploader fails on trunk | Major | test | Ahmed Hussein | Steve Loughran | +| [HDFS-15290](https://issues.apache.org/jira/browse/HDFS-15290) | NPE in HttpServer during NameNode startup | Major | namenode | Konstantin Shvachko | Simbarashe Dzinamarira | +| [HADOOP-17158](https://issues.apache.org/jira/browse/HADOOP-17158) | Test timeout for ITestAbfsInputStreamStatistics#testReadAheadCounters | Major | fs/azure | Mehakmeet Singh | Mehakmeet Singh | +| [HADOOP-17229](https://issues.apache.org/jira/browse/HADOOP-17229) | Test failure as failed request body counted in byte received metric - ITestAbfsNetworkStatistics#testAbfsHttpResponseStatistics | Major | fs/azure, test | Sneha Vijayarajan | Mehakmeet Singh | +| [YARN-10397](https://issues.apache.org/jira/browse/YARN-10397) | SchedulerRequest should be forwarded to scheduler if custom scheduler supports placement constraints | Minor | . | Bilwa S T | Bilwa S T | +| [HDFS-15573](https://issues.apache.org/jira/browse/HDFS-15573) | Only log warning if considerLoad and considerStorageType are both true | Major | . | Stephen O'Donnell | Stephen O'Donnell | +| [YARN-10430](https://issues.apache.org/jira/browse/YARN-10430) | Log improvements in NodeStatusUpdaterImpl | Minor | nodemanager | Bilwa S T | Bilwa S T | +| [HADOOP-17246](https://issues.apache.org/jira/browse/HADOOP-17246) | Fix build the hadoop-build Docker image failed | Major | build | Wanqiang Ji | Wanqiang Ji | +| [HDFS-15438](https://issues.apache.org/jira/browse/HDFS-15438) | Setting dfs.disk.balancer.max.disk.errors = 0 will fail the block copy | Major | balancer & mover | AMC-team | AMC-team | +| [HADOOP-15136](https://issues.apache.org/jira/browse/HADOOP-15136) | Typo in rename spec pseudocode | Major | documentation | Rae Marks | | +| [HADOOP-17088](https://issues.apache.org/jira/browse/HADOOP-17088) | Failed to load XInclude files with relative path. | Minor | conf | Yushi Hayasaka | Yushi Hayasaka | +| [MAPREDUCE-7294](https://issues.apache.org/jira/browse/MAPREDUCE-7294) | Only application master should upload resource to Yarn Shared Cache | Major | mrv2 | zhenzhao wang | zhenzhao wang | +| [HADOOP-17277](https://issues.apache.org/jira/browse/HADOOP-17277) | Correct spelling errors for separator | Trivial | common | Hui Fei | Hui Fei | +| [HADOOP-17286](https://issues.apache.org/jira/browse/HADOOP-17286) | Upgrade to jQuery 3.5.1 in hadoop-yarn-common | Major | . | Wei-Chiu Chuang | Aryan Gupta | +| [HDFS-15591](https://issues.apache.org/jira/browse/HDFS-15591) | RBF: Fix webHdfs file display error | Major | . | wangzhaohui | wangzhaohui | +| [MAPREDUCE-7289](https://issues.apache.org/jira/browse/MAPREDUCE-7289) | Fix wrong comment in LongLong.java | Trivial | documentation, examples | Akira Ajisaka | Wanqiang Ji | +| [YARN-9809](https://issues.apache.org/jira/browse/YARN-9809) | NMs should supply a health status when registering with RM | Major | . | Eric Badger | Eric Badger | +| [HADOOP-17300](https://issues.apache.org/jira/browse/HADOOP-17300) | FileSystem.DirListingIterator.next() call should return NoSuchElementException | Major | common, fs | Mukund Thakur | Mukund Thakur | +| [YARN-10393](https://issues.apache.org/jira/browse/YARN-10393) | MR job live lock caused by completed state container leak in heartbeat between node manager and RM | Major | nodemanager, yarn | zhenzhao wang | Jim Brennan | +| [HDFS-15253](https://issues.apache.org/jira/browse/HDFS-15253) | Set default throttle value on dfs.image.transfer.bandwidthPerSec | Major | namenode | Karthik Palanisamy | Karthik Palanisamy | +| [HDFS-15610](https://issues.apache.org/jira/browse/HDFS-15610) | Reduce datanode upgrade/hardlink thread | Major | datanode | Karthik Palanisamy | Karthik Palanisamy | +| [YARN-10455](https://issues.apache.org/jira/browse/YARN-10455) | TestNMProxy.testNMProxyRPCRetry is not consistent | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15456](https://issues.apache.org/jira/browse/HDFS-15456) | TestExternalStoragePolicySatisfier fails intermittently | Major | . | Ahmed Hussein | Leon Gao | +| [HADOOP-17223](https://issues.apache.org/jira/browse/HADOOP-17223) | update org.apache.httpcomponents:httpclient to 4.5.13 and httpcore to 4.4.13 | Blocker | . | Pranav Bheda | Pranav Bheda | +| [HDFS-15628](https://issues.apache.org/jira/browse/HDFS-15628) | HttpFS server throws NPE if a file is a symlink | Major | fs, httpfs | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15627](https://issues.apache.org/jira/browse/HDFS-15627) | Audit log deletes before collecting blocks | Major | logging, namenode | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17309](https://issues.apache.org/jira/browse/HADOOP-17309) | Javadoc warnings and errors are ignored in the precommit jobs | Major | build, documentation | Akira Ajisaka | Akira Ajisaka | +| [HDFS-14383](https://issues.apache.org/jira/browse/HDFS-14383) | Compute datanode load based on StoragePolicy | Major | hdfs, namenode | Karthik Palanisamy | Ayush Saxena | +| [HADOOP-17310](https://issues.apache.org/jira/browse/HADOOP-17310) | Touch command with -c option is broken | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-15626](https://issues.apache.org/jira/browse/HDFS-15626) | TestWebHDFS.testLargeDirectory failing | Major | test, webhdfs | Mukund Thakur | Mukund Thakur | +| [HADOOP-17298](https://issues.apache.org/jira/browse/HADOOP-17298) | Backslash in username causes build failure in the environment started by start-build-env.sh. | Minor | build | Takeru Kuramoto | Takeru Kuramoto | +| [HDFS-15639](https://issues.apache.org/jira/browse/HDFS-15639) | [JDK 11] Fix Javadoc errors in hadoop-hdfs-client | Major | . | Takanobu Asanuma | Takanobu Asanuma | +| [HDFS-15622](https://issues.apache.org/jira/browse/HDFS-15622) | Deleted blocks linger in the replications queue | Major | hdfs | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17308](https://issues.apache.org/jira/browse/HADOOP-17308) | WASB : PageBlobOutputStream succeeding hflush even when underlying flush to storage failed | Critical | . | Anoop Sam John | Anoop Sam John | +| [HDFS-15641](https://issues.apache.org/jira/browse/HDFS-15641) | DataNode could meet deadlock if invoke refreshNameNode | Critical | . | Hongbing Wang | Hongbing Wang | +| [HADOOP-17328](https://issues.apache.org/jira/browse/HADOOP-17328) | LazyPersist Overwrite fails in direct write mode | Major | . | Ayush Saxena | Ayush Saxena | +| [MAPREDUCE-7302](https://issues.apache.org/jira/browse/MAPREDUCE-7302) | Upgrading to JUnit 4.13 causes testcase TestFetcher.testCorruptedIFile() to fail | Major | test | Peter Bacsko | Peter Bacsko | +| [HDFS-15644](https://issues.apache.org/jira/browse/HDFS-15644) | Failed volumes can cause DNs to stop block reporting | Major | block placement, datanode | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17236](https://issues.apache.org/jira/browse/HADOOP-17236) | Bump up snakeyaml to 1.26 to mitigate CVE-2017-18640 | Major | . | Brahma Reddy Battula | Brahma Reddy Battula | +| [YARN-10467](https://issues.apache.org/jira/browse/YARN-10467) | ContainerIdPBImpl objects can be leaked in RMNodeImpl.completedContainers | Major | resourcemanager | Haibo Chen | Haibo Chen | +| [HADOOP-17329](https://issues.apache.org/jira/browse/HADOOP-17329) | mvn site commands fails due to MetricsSystemImpl changes | Major | . | Xiaoqiao He | Xiaoqiao He | +| [HDFS-15651](https://issues.apache.org/jira/browse/HDFS-15651) | Client could not obtain block when DN CommandProcessingThread exit | Major | . | Yiqun Lin | Aiphago | +| [HADOOP-17340](https://issues.apache.org/jira/browse/HADOOP-17340) | TestLdapGroupsMapping failing -string mismatch in exception validation | Major | test | Steve Loughran | Steve Loughran | +| [HDFS-15667](https://issues.apache.org/jira/browse/HDFS-15667) | Audit log record the unexpected allowed result when delete called | Major | hdfs | Baolong Mao | Baolong Mao | +| [HADOOP-17352](https://issues.apache.org/jira/browse/HADOOP-17352) | Update PATCH\_NAMING\_RULE in the personality file | Minor | build | Akira Ajisaka | Akira Ajisaka | +| [YARN-10458](https://issues.apache.org/jira/browse/YARN-10458) | Hive On Tez queries fails upon submission to dynamically created pools | Major | resourcemanager | Anand Srinivasan | Peter Bacsko | +| [HDFS-15485](https://issues.apache.org/jira/browse/HDFS-15485) | Fix outdated properties of JournalNode when performing rollback | Minor | . | Deegue | Deegue | +| [HADOOP-17096](https://issues.apache.org/jira/browse/HADOOP-17096) | ZStandardCompressor throws java.lang.InternalError: Error (generic) | Major | io | Stephen Jung (Stripe) | Stephen Jung (Stripe) | +| [HADOOP-17327](https://issues.apache.org/jira/browse/HADOOP-17327) | NPE when starting MiniYARNCluster from hadoop-client-minicluster | Critical | . | Chao Sun | | +| [HADOOP-17324](https://issues.apache.org/jira/browse/HADOOP-17324) | Don't relocate org.bouncycastle in shaded client jars | Critical | . | Chao Sun | Chao Sun | +| [HADOOP-17373](https://issues.apache.org/jira/browse/HADOOP-17373) | hadoop-client-integration-tests doesn't work when building with skipShade | Major | . | Chao Sun | Chao Sun | +| [HADOOP-17365](https://issues.apache.org/jira/browse/HADOOP-17365) | Contract test for renaming over existing file is too lenient | Minor | test | Attila Doroszlai | Attila Doroszlai | +| [HADOOP-17358](https://issues.apache.org/jira/browse/HADOOP-17358) | Improve excessive reloading of Configurations | Major | conf | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15538](https://issues.apache.org/jira/browse/HDFS-15538) | Fix the documentation for dfs.namenode.replication.max-streams in hdfs-default.xml | Major | . | Xieming Li | Xieming Li | +| [HADOOP-17362](https://issues.apache.org/jira/browse/HADOOP-17362) | Doing hadoop ls on Har file triggers too many RPC calls | Major | fs | Ahmed Hussein | Ahmed Hussein | +| [YARN-10485](https://issues.apache.org/jira/browse/YARN-10485) | TimelineConnector swallows InterruptedException | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17360](https://issues.apache.org/jira/browse/HADOOP-17360) | Log the remote address for authentication success | Minor | ipc | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15685](https://issues.apache.org/jira/browse/HDFS-15685) | [JDK 14] TestConfiguredFailoverProxyProvider#testResolveDomainNameUsingDNS fails | Major | . | Akira Ajisaka | Akira Ajisaka | +| [MAPREDUCE-7305](https://issues.apache.org/jira/browse/MAPREDUCE-7305) | [JDK 11] TestMRJobsWithProfiler fails | Major | test | Akira Ajisaka | Akira Ajisaka | +| [YARN-10396](https://issues.apache.org/jira/browse/YARN-10396) | Max applications calculation per queue disregards queue level settings in absolute mode | Major | capacity scheduler | Benjamin Teke | Benjamin Teke | +| [HADOOP-17390](https://issues.apache.org/jira/browse/HADOOP-17390) | Skip license check on lz4 code files | Major | build | Zhihua Deng | Zhihua Deng | +| [MAPREDUCE-7307](https://issues.apache.org/jira/browse/MAPREDUCE-7307) | Potential thread leak in LocatedFileStatusFetcher | Major | job submission | Zhihua Deng | Zhihua Deng | +| [HADOOP-17346](https://issues.apache.org/jira/browse/HADOOP-17346) | Fair call queue is defeated by abusive service principals | Major | common, ipc | Ahmed Hussein | Ahmed Hussein | +| [YARN-10470](https://issues.apache.org/jira/browse/YARN-10470) | When building new web ui with root user, the bower install should support it. | Major | build, yarn-ui-v2 | Qi Zhu | Qi Zhu | +| [HADOOP-17398](https://issues.apache.org/jira/browse/HADOOP-17398) | Skipping network I/O in S3A getFileStatus(/) breaks some tests | Major | fs/s3, test | Mukund Thakur | Mukund Thakur | +| [HDFS-15698](https://issues.apache.org/jira/browse/HDFS-15698) | Fix the typo of dfshealth.html after HDFS-15358 | Trivial | namenode | Hui Fei | Hui Fei | +| [YARN-10498](https://issues.apache.org/jira/browse/YARN-10498) | Fix Yarn CapacityScheduler Markdown document | Trivial | documentation | zhaoshengjie | zhaoshengjie | +| [HADOOP-17399](https://issues.apache.org/jira/browse/HADOOP-17399) | lz4 sources missing for native Visual Studio project | Major | native | Gautham Banasandra | Gautham Banasandra | +| [HDFS-15695](https://issues.apache.org/jira/browse/HDFS-15695) | NN should not let the balancer run in safemode | Major | namenode | Ahmed Hussein | Ahmed Hussein | +| [YARN-10511](https://issues.apache.org/jira/browse/YARN-10511) | Update yarn.nodemanager.env-whitelist value in docs | Minor | documentation | Andrea Scarpino | Andrea Scarpino | +| [HADOOP-16080](https://issues.apache.org/jira/browse/HADOOP-16080) | hadoop-aws does not work with hadoop-client-api | Major | fs/s3 | Keith Turner | Chao Sun | +| [HDFS-15660](https://issues.apache.org/jira/browse/HDFS-15660) | StorageTypeProto is not compatiable between 3.x and 2.6 | Major | . | Ryan Wu | Ryan Wu | +| [HDFS-15707](https://issues.apache.org/jira/browse/HDFS-15707) | NNTop counts don't add up as expected | Major | hdfs, metrics, namenode | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15709](https://issues.apache.org/jira/browse/HDFS-15709) | EC: Socket file descriptor leak in StripedBlockChecksumReconstructor | Major | datanode, ec, erasure-coding | Yushi Hayasaka | Yushi Hayasaka | +| [YARN-10495](https://issues.apache.org/jira/browse/YARN-10495) | make the rpath of container-executor configurable | Major | yarn | angerszhu | angerszhu | +| [HDFS-15240](https://issues.apache.org/jira/browse/HDFS-15240) | Erasure Coding: dirty buffer causes reconstruction block error | Blocker | datanode, erasure-coding | HuangTao | HuangTao | +| [YARN-10491](https://issues.apache.org/jira/browse/YARN-10491) | Fix deprecation warnings in SLSWebApp.java | Minor | build | Akira Ajisaka | Ankit Kumar | +| [HADOOP-13571](https://issues.apache.org/jira/browse/HADOOP-13571) | ServerSocketUtil.getPort() should use loopback address, not 0.0.0.0 | Major | . | Eric Badger | Eric Badger | +| [HDFS-15725](https://issues.apache.org/jira/browse/HDFS-15725) | Lease Recovery never completes for a committed block which the DNs never finalize | Major | namenode | Stephen O'Donnell | Stephen O'Donnell | +| [HDFS-15170](https://issues.apache.org/jira/browse/HDFS-15170) | EC: Block gets marked as CORRUPT in case of failover and pipeline recovery | Critical | erasure-coding | Ayush Saxena | Ayush Saxena | +| [YARN-10536](https://issues.apache.org/jira/browse/YARN-10536) | Client in distributedShell swallows interrupt exceptions | Major | client, distributed-shell | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15743](https://issues.apache.org/jira/browse/HDFS-15743) | Fix -Pdist build failure of hadoop-hdfs-native-client | Major | . | Masatake Iwasaki | Masatake Iwasaki | +| [YARN-10334](https://issues.apache.org/jira/browse/YARN-10334) | TestDistributedShell leaks resources on timeout/failure | Major | distributed-shell, test, yarn | Ahmed Hussein | Ahmed Hussein | +| [YARN-10558](https://issues.apache.org/jira/browse/YARN-10558) | Fix failure of TestDistributedShell#testDSShellWithOpportunisticContainers | Minor | test | Masatake Iwasaki | Masatake Iwasaki | +| [HDFS-15719](https://issues.apache.org/jira/browse/HDFS-15719) | [Hadoop 3] Both NameNodes can crash simultaneously due to the short JN socket timeout | Critical | . | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [YARN-10560](https://issues.apache.org/jira/browse/YARN-10560) | Upgrade node.js to 10.23.1 and yarn to 1.22.5 in Web UI v2 | Major | webapp, yarn-ui-v2 | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17444](https://issues.apache.org/jira/browse/HADOOP-17444) | ADLFS: Update SDK version from 2.3.6 to 2.3.9 | Minor | fs/adl | Bilahari T H | Bilahari T H | +| [YARN-10528](https://issues.apache.org/jira/browse/YARN-10528) | maxAMShare should only be accepted for leaf queues, not parent queues | Major | . | Siddharth Ahuja | Siddharth Ahuja | +| [HADOOP-17438](https://issues.apache.org/jira/browse/HADOOP-17438) | Increase docker memory limit in Jenkins | Major | build, scripts, test, yetus | Ahmed Hussein | Ahmed Hussein | +| [MAPREDUCE-7310](https://issues.apache.org/jira/browse/MAPREDUCE-7310) | Clear the fileMap in JHEventHandlerForSigtermTest | Minor | test | Zhengxi Li | Zhengxi Li | +| [HADOOP-16947](https://issues.apache.org/jira/browse/HADOOP-16947) | Stale record should be remove when MutableRollingAverages generating aggregate data. | Major | . | Haibin Huang | Haibin Huang | +| [YARN-10515](https://issues.apache.org/jira/browse/YARN-10515) | Fix flaky test TestCapacitySchedulerAutoQueueCreation.testDynamicAutoQueueCreationWithTags | Major | test | Peter Bacsko | Peter Bacsko | +| [HADOOP-17224](https://issues.apache.org/jira/browse/HADOOP-17224) | Install Intel ISA-L library in Dockerfile | Blocker | . | Takanobu Asanuma | Takanobu Asanuma | +| [HDFS-15632](https://issues.apache.org/jira/browse/HDFS-15632) | AbstractContractDeleteTest should set recursive parameter to true for recursive test cases. | Major | . | Konstantin Shvachko | Anton Kutuzov | +| [HADOOP-17258](https://issues.apache.org/jira/browse/HADOOP-17258) | MagicS3GuardCommitter fails with \`pendingset\` already exists | Major | fs/s3 | Dongjoon Hyun | Dongjoon Hyun | +| [HDFS-15661](https://issues.apache.org/jira/browse/HDFS-15661) | The DeadNodeDetector shouldn't be shared by different DFSClients. | Major | . | Jinglun | Jinglun | +| [HDFS-10498](https://issues.apache.org/jira/browse/HDFS-10498) | Intermittent test failure org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotFileLength.testSnapshotfileLength | Major | hdfs, snapshots | Hanisha Koneru | Jim Brennan | +| [HADOOP-17506](https://issues.apache.org/jira/browse/HADOOP-17506) | Fix typo in BUILDING.txt | Trivial | documentation | Gautham Banasandra | Gautham Banasandra | +| [HDFS-15791](https://issues.apache.org/jira/browse/HDFS-15791) | Possible Resource Leak in FSImageFormatProtobuf | Major | namenode | Narges Shadab | Narges Shadab | +| [HDFS-15795](https://issues.apache.org/jira/browse/HDFS-15795) | EC: Wrong checksum when reconstruction was failed by exception | Major | datanode, ec, erasure-coding | Yushi Hayasaka | Yushi Hayasaka | +| [HDFS-15779](https://issues.apache.org/jira/browse/HDFS-15779) | EC: fix NPE caused by StripedWriter.clearBuffers during reconstruct block | Major | . | Hongbing Wang | Hongbing Wang | +| [HADOOP-17217](https://issues.apache.org/jira/browse/HADOOP-17217) | S3A FileSystem does not correctly delete directories with fake entries | Major | fs/s3 | Kaya Kupferschmidt | | +| [HDFS-15798](https://issues.apache.org/jira/browse/HDFS-15798) | EC: Reconstruct task failed, and It would be XmitsInProgress of DN has negative number | Major | . | Haiyang Hu | Haiyang Hu | +| [YARN-10607](https://issues.apache.org/jira/browse/YARN-10607) | User environment is unable to prepend PATH when mapreduce.admin.user.env also sets PATH | Major | . | Eric Badger | Eric Badger | +| [HDFS-15792](https://issues.apache.org/jira/browse/HDFS-15792) | ClasscastException while loading FSImage | Major | nn | Renukaprasad C | Renukaprasad C | +| [HADOOP-17516](https://issues.apache.org/jira/browse/HADOOP-17516) | Upgrade ant to 1.10.9 | Major | . | Akira Ajisaka | Akira Ajisaka | +| [YARN-10500](https://issues.apache.org/jira/browse/YARN-10500) | TestDelegationTokenRenewer fails intermittently | Major | test | Akira Ajisaka | Masatake Iwasaki | +| [HDFS-15806](https://issues.apache.org/jira/browse/HDFS-15806) | DeadNodeDetector should close all the threads when it is closed. | Major | . | Jinglun | Jinglun | +| [HADOOP-17534](https://issues.apache.org/jira/browse/HADOOP-17534) | Upgrade Jackson databind to 2.10.5.1 | Major | build | Adam Roberts | Akira Ajisaka | +| [MAPREDUCE-7323](https://issues.apache.org/jira/browse/MAPREDUCE-7323) | Remove job\_history\_summary.py | Major | . | Akira Ajisaka | Akira Ajisaka | +| [YARN-10647](https://issues.apache.org/jira/browse/YARN-10647) | Fix TestRMNodeLabelsManager failed after YARN-10501. | Major | . | Qi Zhu | Qi Zhu | +| [HADOOP-17528](https://issues.apache.org/jira/browse/HADOOP-17528) | Not closing an SFTP File System instance prevents JVM from exiting. | Major | . | Mikhail Pryakhin | Mikhail Pryakhin | +| [HADOOP-17510](https://issues.apache.org/jira/browse/HADOOP-17510) | Hadoop prints sensitive Cookie information. | Major | . | Renukaprasad C | Renukaprasad C | +| [HDFS-15422](https://issues.apache.org/jira/browse/HDFS-15422) | Reported IBR is partially replaced with stored info when queuing. | Critical | namenode | Kihwal Lee | Stephen O'Donnell | +| [YARN-10651](https://issues.apache.org/jira/browse/YARN-10651) | CapacityScheduler crashed with NPE in AbstractYarnScheduler.updateNodeResource() | Major | . | Haibo Chen | Haibo Chen | +| [MAPREDUCE-7320](https://issues.apache.org/jira/browse/MAPREDUCE-7320) | ClusterMapReduceTestCase does not clean directories | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-14013](https://issues.apache.org/jira/browse/HDFS-14013) | Skip any credentials stored in HDFS when starting ZKFC | Major | hdfs | Krzysztof Adamski | Stephen O'Donnell | +| [HDFS-15849](https://issues.apache.org/jira/browse/HDFS-15849) | ExpiredHeartbeats metric should be of Type.COUNTER | Major | metrics | Konstantin Shvachko | Qi Zhu | +| [YARN-10649](https://issues.apache.org/jira/browse/YARN-10649) | Fix RMNodeImpl.updateExistContainers leak | Major | resourcemanager | Max Xie | Max Xie | +| [YARN-10672](https://issues.apache.org/jira/browse/YARN-10672) | All testcases in TestReservations are flaky | Major | . | Szilard Nemeth | Szilard Nemeth | +| [HADOOP-17557](https://issues.apache.org/jira/browse/HADOOP-17557) | skip-dir option is not processed by Yetus | Major | build, precommit, yetus | Ahmed Hussein | Ahmed Hussein | +| [YARN-10671](https://issues.apache.org/jira/browse/YARN-10671) | Fix Typo in TestSchedulingRequestContainerAllocation | Minor | . | D M Murali Krishna Reddy | D M Murali Krishna Reddy | +| [HDFS-15875](https://issues.apache.org/jira/browse/HDFS-15875) | Check whether file is being truncated before truncate | Major | . | Hui Fei | Hui Fei | +| [HADOOP-17582](https://issues.apache.org/jira/browse/HADOOP-17582) | Replace GitHub App Token with GitHub OAuth token | Major | build | Akira Ajisaka | Akira Ajisaka | +| [YARN-10687](https://issues.apache.org/jira/browse/YARN-10687) | Add option to disable/enable free disk space checking and percentage checking for full and not-full disks | Major | nodemanager | Qi Zhu | Qi Zhu | +| [HADOOP-17586](https://issues.apache.org/jira/browse/HADOOP-17586) | Upgrade org.codehaus.woodstox:stax2-api to 4.2.1 | Major | . | Ayush Saxena | Ayush Saxena | +| [HADOOP-17585](https://issues.apache.org/jira/browse/HADOOP-17585) | Correct timestamp format in the docs for the touch command | Major | . | Stephen O'Donnell | Stephen O'Donnell | +| [HDFS-15809](https://issues.apache.org/jira/browse/HDFS-15809) | DeadNodeDetector doesn't remove live nodes from dead node set. | Major | . | Jinglun | Jinglun | +| [HADOOP-17532](https://issues.apache.org/jira/browse/HADOOP-17532) | Yarn Job execution get failed when LZ4 Compression Codec is used | Major | common | Bhavik Patel | Bhavik Patel | +| [YARN-10588](https://issues.apache.org/jira/browse/YARN-10588) | Percentage of queue and cluster is zero in WebUI | Major | . | Bilwa S T | Bilwa S T | +| [MAPREDUCE-7322](https://issues.apache.org/jira/browse/MAPREDUCE-7322) | revisiting TestMRIntermediateDataEncryption | Major | job submission, security, test | Ahmed Hussein | Ahmed Hussein | +| [YARN-10703](https://issues.apache.org/jira/browse/YARN-10703) | Fix potential null pointer error of gpuNodeResourceUpdateHandler in NodeResourceMonitorImpl. | Major | . | Qi Zhu | Qi Zhu | +| [HDFS-15868](https://issues.apache.org/jira/browse/HDFS-15868) | Possible Resource Leak in EditLogFileOutputStream | Major | . | Narges Shadab | Narges Shadab | +| [HADOOP-17592](https://issues.apache.org/jira/browse/HADOOP-17592) | Fix the wrong CIDR range example in Proxy User documentation | Minor | documentation | Kwangsun Noh | Kwangsun Noh | +| [YARN-10706](https://issues.apache.org/jira/browse/YARN-10706) | Upgrade com.github.eirslett:frontend-maven-plugin to 1.11.2 | Major | buid | Mingliang Liu | Mingliang Liu | +| [MAPREDUCE-7325](https://issues.apache.org/jira/browse/MAPREDUCE-7325) | Intermediate data encryption is broken in LocalJobRunner | Major | job submission, security | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15908](https://issues.apache.org/jira/browse/HDFS-15908) | Possible Resource Leak in org.apache.hadoop.hdfs.qjournal.server.Journal | Major | . | Narges Shadab | Narges Shadab | +| [HDFS-15910](https://issues.apache.org/jira/browse/HDFS-15910) | Replace bzero with explicit\_bzero for better safety | Critical | libhdfs++ | Gautham Banasandra | Gautham Banasandra | +| [YARN-10697](https://issues.apache.org/jira/browse/YARN-10697) | Resources are displayed in bytes in UI for schedulers other than capacity | Major | . | Bilwa S T | Bilwa S T | +| [HADOOP-17602](https://issues.apache.org/jira/browse/HADOOP-17602) | Upgrade JUnit to 4.13.1 | Major | build, security, test | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15900](https://issues.apache.org/jira/browse/HDFS-15900) | RBF: empty blockpool id on dfsrouter caused by UNAVAILABLE NameNode | Major | rbf | Harunobu Daikoku | Harunobu Daikoku | +| [YARN-10501](https://issues.apache.org/jira/browse/YARN-10501) | Can't remove all node labels after add node label without nodemanager port | Critical | yarn | caozhiqiang | caozhiqiang | +| [YARN-10437](https://issues.apache.org/jira/browse/YARN-10437) | Destroy yarn service if any YarnException occurs during submitApp | Minor | yarn-native-services | D M Murali Krishna Reddy | D M Murali Krishna Reddy | +| [YARN-10439](https://issues.apache.org/jira/browse/YARN-10439) | Yarn Service AM listens on all IP's on the machine | Minor | security, yarn-native-services | D M Murali Krishna Reddy | D M Murali Krishna Reddy | +| [YARN-10441](https://issues.apache.org/jira/browse/YARN-10441) | Add support for hadoop.http.rmwebapp.scheduler.page.class | Major | scheduler | D M Murali Krishna Reddy | D M Murali Krishna Reddy | +| [YARN-10466](https://issues.apache.org/jira/browse/YARN-10466) | Fix NullPointerException in yarn-services Component.java | Minor | . | D M Murali Krishna Reddy | D M Murali Krishna Reddy | +| [YARN-10716](https://issues.apache.org/jira/browse/YARN-10716) | Fix typo in ContainerRuntime | Trivial | documentation | Wanqiang Ji | xishuhai | +| [HDFS-15494](https://issues.apache.org/jira/browse/HDFS-15494) | TestReplicaCachingGetSpaceUsed#testReplicaCachingGetSpaceUsedByRBWReplica Fails on Windows | Major | . | Ravuri Sushma sree | Ravuri Sushma sree | +| [HADOOP-17610](https://issues.apache.org/jira/browse/HADOOP-17610) | DelegationTokenAuthenticator prints token information | Major | . | Ravuri Sushma sree | Ravuri Sushma sree | +| [HADOOP-17587](https://issues.apache.org/jira/browse/HADOOP-17587) | Kinit with keytab should not display the keytab file's full path in any logs | Major | . | Ravuri Sushma sree | Ravuri Sushma sree | +| [HDFS-15950](https://issues.apache.org/jira/browse/HDFS-15950) | Remove unused hdfs.proto import | Major | hdfs-client | Gautham Banasandra | Gautham Banasandra | +| [HDFS-15940](https://issues.apache.org/jira/browse/HDFS-15940) | Some tests in TestBlockRecovery are consistently failing | Major | . | Viraj Jasani | Viraj Jasani | +| [HDFS-15949](https://issues.apache.org/jira/browse/HDFS-15949) | Fix integer overflow | Major | libhdfs++ | Gautham Banasandra | Gautham Banasandra | +| [HADOOP-17621](https://issues.apache.org/jira/browse/HADOOP-17621) | hadoop-auth to remove jetty-server dependency | Major | auth | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HDFS-15948](https://issues.apache.org/jira/browse/HDFS-15948) | Fix test4tests for libhdfspp | Critical | build, libhdfs++ | Gautham Banasandra | Gautham Banasandra | +| [HADOOP-17617](https://issues.apache.org/jira/browse/HADOOP-17617) | Incorrect representation of RESPONSE for Get Key Version in KMS index.md.vm file | Major | . | Ravuri Sushma sree | Ravuri Sushma sree | +| [MAPREDUCE-7329](https://issues.apache.org/jira/browse/MAPREDUCE-7329) | HadoopPipes task may fail when linux kernel version change from 3.x to 4.x | Major | pipes | chaoli | chaoli | +| [HADOOP-17608](https://issues.apache.org/jira/browse/HADOOP-17608) | Fix TestKMS failure | Major | kms | Akira Ajisaka | Akira Ajisaka | +| [HDFS-15963](https://issues.apache.org/jira/browse/HDFS-15963) | Unreleased volume references cause an infinite loop | Major | datanode | Shuyan Zhang | Shuyan Zhang | +| [YARN-10460](https://issues.apache.org/jira/browse/YARN-10460) | Upgrading to JUnit 4.13 causes tests in TestNodeStatusUpdater to fail | Major | nodemanager, test | Peter Bacsko | Peter Bacsko | +| [HADOOP-17641](https://issues.apache.org/jira/browse/HADOOP-17641) | ITestWasbUriAndConfiguration.testCanonicalServiceName() failing now mockaccount exists | Minor | fs/azure, test | Steve Loughran | Steve Loughran | +| [HDFS-15974](https://issues.apache.org/jira/browse/HDFS-15974) | RBF: Unable to display the datanode UI of the router | Major | rbf, ui | Xiangyi Zhu | Xiangyi Zhu | +| [HADOOP-17655](https://issues.apache.org/jira/browse/HADOOP-17655) | Upgrade Jetty to 9.4.40 | Blocker | . | Akira Ajisaka | Akira Ajisaka | +| [YARN-10749](https://issues.apache.org/jira/browse/YARN-10749) | Can't remove all node labels after add node label without nodemanager port, broken by YARN-10647 | Major | . | D M Murali Krishna Reddy | D M Murali Krishna Reddy | +| [HDFS-15566](https://issues.apache.org/jira/browse/HDFS-15566) | NN restart fails after RollingUpgrade from 3.1.3/3.2.1 to 3.3.0 | Blocker | . | Brahma Reddy Battula | Brahma Reddy Battula | +| [HDFS-15621](https://issues.apache.org/jira/browse/HDFS-15621) | Datanode DirectoryScanner uses excessive memory | Major | datanode | Stephen O'Donnell | Stephen O'Donnell | +| [YARN-10752](https://issues.apache.org/jira/browse/YARN-10752) | Shaded guava not found when compiling with profile hbase2.0 | Blocker | timelineserver | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HDFS-15865](https://issues.apache.org/jira/browse/HDFS-15865) | Interrupt DataStreamer thread | Minor | datanode | Karthik Palanisamy | | +| [HDFS-15810](https://issues.apache.org/jira/browse/HDFS-15810) | RBF: RBFMetrics's TotalCapacity out of bounds | Major | . | Xiaoxing Wei | Fengnan Li | +| [HADOOP-17657](https://issues.apache.org/jira/browse/HADOOP-17657) | SequeneFile.Writer should implement StreamCapabilities | Major | . | Kishen Das | Kishen Das | +| [YARN-10756](https://issues.apache.org/jira/browse/YARN-10756) | Remove additional junit 4.11 dependency from javadoc | Major | build, test, timelineservice | ANANDA G B | Akira Ajisaka | +| [HADOOP-17375](https://issues.apache.org/jira/browse/HADOOP-17375) | Fix the error of TestDynamometerInfra | Major | test | Akira Ajisaka | Takanobu Asanuma | +| [HDFS-16001](https://issues.apache.org/jira/browse/HDFS-16001) | TestOfflineEditsViewer.testStored() fails reading negative value of FSEditLogOpCodes | Blocker | hdfs | Konstantin Shvachko | Akira Ajisaka | +| [HADOOP-17142](https://issues.apache.org/jira/browse/HADOOP-17142) | Fix outdated properties of journal node when perform rollback | Minor | . | Deegue | | +| [HADOOP-17107](https://issues.apache.org/jira/browse/HADOOP-17107) | hadoop-azure parallel tests not working on recent JDKs | Major | build, fs/azure | Steve Loughran | Steve Loughran | +| [YARN-10555](https://issues.apache.org/jira/browse/YARN-10555) | Missing access check before getAppAttempts | Critical | webapp | lujie | lujie | +| [HADOOP-17703](https://issues.apache.org/jira/browse/HADOOP-17703) | checkcompatibility.py errors out when specifying annotations | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HDFS-16027](https://issues.apache.org/jira/browse/HDFS-16027) | HDFS-15245 breaks source code compatibility between 3.3.0 and 3.3.1. | Blocker | journal-node, ui | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [YARN-10725](https://issues.apache.org/jira/browse/YARN-10725) | Backport YARN-10120 to branch-3.3 | Major | . | Bilwa S T | Bilwa S T | +| [YARN-10701](https://issues.apache.org/jira/browse/YARN-10701) | The yarn.resource-types should support multi types without trimmed. | Major | . | Qi Zhu | Qi Zhu | +| [HADOOP-17718](https://issues.apache.org/jira/browse/HADOOP-17718) | Explicitly set locale in the Dockerfile | Blocker | build | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [MAPREDUCE-7348](https://issues.apache.org/jira/browse/MAPREDUCE-7348) | TestFrameworkUploader#testNativeIO fails | Major | test | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17723](https://issues.apache.org/jira/browse/HADOOP-17723) | [build] fix the Dockerfile for ARM | Blocker | build | Wei-Chiu Chuang | Wei-Chiu Chuang | + + +### TESTS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [MAPREDUCE-7288](https://issues.apache.org/jira/browse/MAPREDUCE-7288) | Fix TestLongLong#testRightShift | Minor | . | Wanqiang Ji | Wanqiang Ji | +| [HDFS-15514](https://issues.apache.org/jira/browse/HDFS-15514) | Remove useless dfs.webhdfs.enabled | Minor | test | Hui Fei | Hui Fei | +| [HADOOP-17205](https://issues.apache.org/jira/browse/HADOOP-17205) | Move personality file from Yetus to Hadoop repository | Major | test, yetus | Chao Sun | Chao Sun | +| [HDFS-15564](https://issues.apache.org/jira/browse/HDFS-15564) | Add Test annotation for TestPersistBlocks#testRestartDfsWithSync | Minor | hdfs | Hui Fei | Hui Fei | +| [YARN-9333](https://issues.apache.org/jira/browse/YARN-9333) | TestFairSchedulerPreemption.testRelaxLocalityPreemptionWithNoLessAMInRemainingNodes fails intermittently | Major | yarn | Prabhu Joseph | Peter Bacsko | +| [HDFS-15690](https://issues.apache.org/jira/browse/HDFS-15690) | Add lz4-java as hadoop-hdfs test dependency | Major | . | L. C. Hsieh | L. C. Hsieh | +| [HADOOP-17459](https://issues.apache.org/jira/browse/HADOOP-17459) | ADL Gen1: Fix the test case failures which are failing after the contract test update in hadoop-common | Minor | fs/adl | Bilahari T H | Bilahari T H | +| [HDFS-15898](https://issues.apache.org/jira/browse/HDFS-15898) | Test case TestOfflineImageViewer fails | Minor | . | Hui Fei | Hui Fei | + + +### SUB-TASKS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HADOOP-16857](https://issues.apache.org/jira/browse/HADOOP-16857) | ABFS: Optimize HttpRequest retry triggers | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17002](https://issues.apache.org/jira/browse/HADOOP-17002) | ABFS: Avoid storage calls to check if the account is HNS enabled or not | Minor | fs/azure | Bilahari T H | Bilahari T H | +| [YARN-10215](https://issues.apache.org/jira/browse/YARN-10215) | Endpoint for obtaining direct URL for the logs | Major | yarn | Adam Antal | Andras Gyori | +| [HDFS-14353](https://issues.apache.org/jira/browse/HDFS-14353) | Erasure Coding: metrics xmitsInProgress become to negative. | Major | datanode, erasure-coding | Baolong Mao | Baolong Mao | +| [HDFS-15305](https://issues.apache.org/jira/browse/HDFS-15305) | Extend ViewFS and provide ViewFSOverloadScheme implementation with scheme configurable. | Major | fs, hadoop-client, hdfs-client, viewfs | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [YARN-10259](https://issues.apache.org/jira/browse/YARN-10259) | Reserved Containers not allocated from available space of other nodes in CandidateNodeSet in MultiNodePlacement | Major | capacityscheduler | Prabhu Joseph | Prabhu Joseph | +| [HDFS-15306](https://issues.apache.org/jira/browse/HDFS-15306) | Make mount-table to read from central place ( Let's say from HDFS) | Major | configuration, hadoop-client | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-16756](https://issues.apache.org/jira/browse/HADOOP-16756) | distcp -update to S3A; abfs, etc always overwrites due to block size mismatch | Major | fs/s3, tools/distcp | Daisuke Kobayashi | Steve Loughran | +| [HDFS-15322](https://issues.apache.org/jira/browse/HDFS-15322) | Make NflyFS to work when ViewFsOverloadScheme's scheme and target uris schemes are same. | Major | fs, nflyFs, viewfs, viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [YARN-10108](https://issues.apache.org/jira/browse/YARN-10108) | FS-CS converter: nestedUserQueue with default rule results in invalid queue mapping | Major | . | Prabhu Joseph | Gergely Pollák | +| [HADOOP-16852](https://issues.apache.org/jira/browse/HADOOP-16852) | ABFS: Send error back to client for Read Ahead request failure | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17053](https://issues.apache.org/jira/browse/HADOOP-17053) | ABFS: FS initialize fails for incompatible account-agnostic Token Provider setting | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HDFS-15321](https://issues.apache.org/jira/browse/HDFS-15321) | Make DFSAdmin tool to work with ViewFSOverloadScheme | Major | dfsadmin, fs, viewfs | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-16568](https://issues.apache.org/jira/browse/HADOOP-16568) | S3A FullCredentialsTokenBinding fails if local credentials are unset | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [YARN-10284](https://issues.apache.org/jira/browse/YARN-10284) | Add lazy initialization of LogAggregationFileControllerFactory in LogServlet | Major | log-aggregation, yarn | Adam Antal | Adam Antal | +| [HDFS-15330](https://issues.apache.org/jira/browse/HDFS-15330) | Document the ViewFSOverloadScheme details in ViewFS guide | Major | viewfs, viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15389](https://issues.apache.org/jira/browse/HDFS-15389) | DFSAdmin should close filesystem and dfsadmin -setBalancerBandwidth should work with ViewFSOverloadScheme | Major | dfsadmin, viewfsOverloadScheme | Ayush Saxena | Ayush Saxena | +| [HDFS-15394](https://issues.apache.org/jira/browse/HDFS-15394) | Add all available fs.viewfs.overload.scheme.target.\.impl classes in core-default.xml bydefault. | Major | configuration, viewfs, viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15387](https://issues.apache.org/jira/browse/HDFS-15387) | FSUsage$DF should consider ViewFSOverloadScheme in processPath | Minor | viewfs | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [YARN-10292](https://issues.apache.org/jira/browse/YARN-10292) | FS-CS converter: add an option to enable asynchronous scheduling in CapacityScheduler | Major | fairscheduler | Benjamin Teke | Benjamin Teke | +| [HADOOP-17004](https://issues.apache.org/jira/browse/HADOOP-17004) | ABFS: Improve the ABFS driver documentation | Minor | fs/azure | Bilahari T H | Bilahari T H | +| [HDFS-15418](https://issues.apache.org/jira/browse/HDFS-15418) | ViewFileSystemOverloadScheme should represent mount links as non symlinks | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [YARN-9930](https://issues.apache.org/jira/browse/YARN-9930) | Support max running app logic for CapacityScheduler | Major | capacity scheduler, capacityscheduler | zhoukang | Peter Bacsko | +| [HDFS-15427](https://issues.apache.org/jira/browse/HDFS-15427) | Merged ListStatus with Fallback target filesystem and InternalDirViewFS. | Major | viewfs | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [YARN-10316](https://issues.apache.org/jira/browse/YARN-10316) | FS-CS converter: convert maxAppsDefault, maxRunningApps settings | Major | . | Peter Bacsko | Peter Bacsko | +| [HADOOP-17054](https://issues.apache.org/jira/browse/HADOOP-17054) | ABFS: Fix idempotency test failures when SharedKey is set as AuthType | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17050](https://issues.apache.org/jira/browse/HADOOP-17050) | S3A to support additional token issuers | Minor | fs/s3 | Gabor Bota | Steve Loughran | +| [HADOOP-17015](https://issues.apache.org/jira/browse/HADOOP-17015) | ABFS: Make PUT and POST operations idempotent | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HDFS-15429](https://issues.apache.org/jira/browse/HDFS-15429) | mkdirs should work when parent dir is internalDir and fallback configured. | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15436](https://issues.apache.org/jira/browse/HDFS-15436) | Default mount table name used by ViewFileSystem should be configurable | Major | viewfs, viewfsOverloadScheme | Virajith Jalaparti | Virajith Jalaparti | +| [HADOOP-16798](https://issues.apache.org/jira/browse/HADOOP-16798) | job commit failure in S3A MR magic committer test | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [YARN-10325](https://issues.apache.org/jira/browse/YARN-10325) | Document max-parallel-apps for Capacity Scheduler | Major | capacity scheduler, capacityscheduler | Peter Bacsko | Peter Bacsko | +| [HADOOP-16961](https://issues.apache.org/jira/browse/HADOOP-16961) | ABFS: Adding metrics to AbfsInputStream (AbfsInputStreamStatistics) | Major | fs/azure | Gabor Bota | Mehakmeet Singh | +| [HADOOP-17086](https://issues.apache.org/jira/browse/HADOOP-17086) | ABFS: Fix the parsing errors in ABFS Driver with creation Time (being returned in ListPath) | Major | fs/azure | Ishani | Bilahari T H | +| [HDFS-15430](https://issues.apache.org/jira/browse/HDFS-15430) | create should work when parent dir is internalDir and fallback configured. | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15450](https://issues.apache.org/jira/browse/HDFS-15450) | Fix NN trash emptier to work if ViewFSOveroadScheme enabled | Major | namenode, viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17111](https://issues.apache.org/jira/browse/HADOOP-17111) | Replace Guava Optional with Java8+ Optional | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15449](https://issues.apache.org/jira/browse/HDFS-15449) | Optionally ignore port number in mount-table name when picking from initialized uri | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17058](https://issues.apache.org/jira/browse/HADOOP-17058) | Support for Appendblob in abfs driver | Major | fs/azure | Ishani | Ishani | +| [HDFS-15462](https://issues.apache.org/jira/browse/HDFS-15462) | Add fs.viewfs.overload.scheme.target.ofs.impl to core-default.xml | Major | configuration, viewfs, viewfsOverloadScheme | Siyao Meng | Siyao Meng | +| [HDFS-15464](https://issues.apache.org/jira/browse/HDFS-15464) | ViewFsOverloadScheme should work when -fs option pointing to remote cluster without mount links | Major | viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17105](https://issues.apache.org/jira/browse/HADOOP-17105) | S3AFS globStatus attempts to resolve symlinks | Minor | fs/s3 | Jimmy Zuber | Jimmy Zuber | +| [HADOOP-17022](https://issues.apache.org/jira/browse/HADOOP-17022) | Tune S3A listFiles() api. | Major | fs/s3 | Mukund Thakur | Mukund Thakur | +| [HADOOP-17101](https://issues.apache.org/jira/browse/HADOOP-17101) | Replace Guava Function with Java8+ Function | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17099](https://issues.apache.org/jira/browse/HADOOP-17099) | Replace Guava Predicate with Java8+ Predicate | Minor | . | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-16682](https://issues.apache.org/jira/browse/HADOOP-16682) | Remove unnecessary ABFS toString() invocations | Minor | fs/azure | Jeetesh Mangwani | Bilahari T H | +| [HADOOP-17136](https://issues.apache.org/jira/browse/HADOOP-17136) | ITestS3ADirectoryPerformance.testListOperations failing | Minor | fs/s3, test | Mukund Thakur | Mukund Thakur | +| [HDFS-15478](https://issues.apache.org/jira/browse/HDFS-15478) | When Empty mount points, we are assigning fallback link to self. But it should not use full URI for target fs. | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17100](https://issues.apache.org/jira/browse/HADOOP-17100) | Replace Guava Supplier with Java8+ Supplier in Hadoop | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17132](https://issues.apache.org/jira/browse/HADOOP-17132) | ABFS: Fix For Idempotency code | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17092](https://issues.apache.org/jira/browse/HADOOP-17092) | ABFS: Long waits and unintended retries when multiple threads try to fetch token using ClientCreds | Major | fs/azure | Sneha Vijayarajan | Bilahari T H | +| [HADOOP-17131](https://issues.apache.org/jira/browse/HADOOP-17131) | Refactor S3A Listing code for better isolation | Major | fs/s3 | Mukund Thakur | Mukund Thakur | +| [HADOOP-17137](https://issues.apache.org/jira/browse/HADOOP-17137) | ABFS: Tests ITestAbfsNetworkStatistics need to be config setting agnostic | Minor | fs/azure, test | Sneha Vijayarajan | Bilahari T H | +| [HADOOP-17149](https://issues.apache.org/jira/browse/HADOOP-17149) | ABFS: Test failure: testFailedRequestWhenCredentialsNotCorrect fails when run with SharedKey | Minor | fs/azure | Sneha Vijayarajan | Bilahari T H | +| [HADOOP-17163](https://issues.apache.org/jira/browse/HADOOP-17163) | ABFS: Add debug log for rename failures | Major | fs/azure | Bilahari T H | Bilahari T H | +| [HDFS-15515](https://issues.apache.org/jira/browse/HDFS-15515) | mkdirs on fallback should throw IOE out instead of suppressing and returning false | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-13230](https://issues.apache.org/jira/browse/HADOOP-13230) | S3A to optionally retain directory markers | Major | fs/s3 | Aaron Fabbri | Steve Loughran | +| [HADOOP-14124](https://issues.apache.org/jira/browse/HADOOP-14124) | S3AFileSystem silently deletes "fake" directories when writing a file. | Minor | fs, fs/s3 | Joel Baranick | | +| [HADOOP-16966](https://issues.apache.org/jira/browse/HADOOP-16966) | ABFS: Upgrade Store REST API Version to 2019-12-12 | Major | fs/azure | Ishani | Sneha Vijayarajan | +| [HDFS-15533](https://issues.apache.org/jira/browse/HDFS-15533) | Provide DFS API compatible class(ViewDistributedFileSystem), but use ViewFileSystemOverloadScheme inside | Major | dfs, viewfs | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17074](https://issues.apache.org/jira/browse/HADOOP-17074) | Optimise s3a Listing to be fully asynchronous. | Major | fs/s3 | Mukund Thakur | Mukund Thakur | +| [HDFS-15529](https://issues.apache.org/jira/browse/HDFS-15529) | getChildFilesystems should include fallback fs as well | Critical | viewfs, viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17167](https://issues.apache.org/jira/browse/HADOOP-17167) | ITestS3AEncryptionWithDefaultS3Settings fails if default bucket encryption != KMS | Minor | fs/s3 | Steve Loughran | Mukund Thakur | +| [HADOOP-17227](https://issues.apache.org/jira/browse/HADOOP-17227) | improve s3guard markers command line tool | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [YARN-10332](https://issues.apache.org/jira/browse/YARN-10332) | RESOURCE\_UPDATE event was repeatedly registered in DECOMMISSIONING state | Minor | resourcemanager | yehuanhuan | yehuanhuan | +| [HDFS-15558](https://issues.apache.org/jira/browse/HDFS-15558) | ViewDistributedFileSystem#recoverLease should call super.recoverLease when there are no mounts configured | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17181](https://issues.apache.org/jira/browse/HADOOP-17181) | Handle transient stream read failures in FileSystem contract tests | Minor | fs/s3 | Steve Loughran | | +| [HDFS-15551](https://issues.apache.org/jira/browse/HDFS-15551) | Tiny Improve for DeadNode detector | Minor | hdfs-client | dark\_num | imbajin | +| [HDFS-15555](https://issues.apache.org/jira/browse/HDFS-15555) | RBF: Refresh cacheNS when SocketException occurs | Major | rbf | Akira Ajisaka | Akira Ajisaka | +| [HDFS-15532](https://issues.apache.org/jira/browse/HDFS-15532) | listFiles on root/InternalDir will fail if fallback root has file | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15578](https://issues.apache.org/jira/browse/HDFS-15578) | Fix the rename issues with fallback fs enabled | Major | viewfs, viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15585](https://issues.apache.org/jira/browse/HDFS-15585) | ViewDFS#getDelegationToken should not throw UnsupportedOperationException. | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17215](https://issues.apache.org/jira/browse/HADOOP-17215) | ABFS: Support for conditional overwrite | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HDFS-14811](https://issues.apache.org/jira/browse/HDFS-14811) | RBF: TestRouterRpc#testErasureCoding is flaky | Major | rbf | Chen Zhang | Chen Zhang | +| [HADOOP-17023](https://issues.apache.org/jira/browse/HADOOP-17023) | Tune listStatus() api of s3a. | Major | fs/s3 | Mukund Thakur | Mukund Thakur | +| [HADOOP-17279](https://issues.apache.org/jira/browse/HADOOP-17279) | ABFS: Test testNegativeScenariosForCreateOverwriteDisabled fails for non-HNS account | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17183](https://issues.apache.org/jira/browse/HADOOP-17183) | ABFS: Enable checkaccess API | Major | fs/azure | Bilahari T H | Bilahari T H | +| [HDFS-15613](https://issues.apache.org/jira/browse/HDFS-15613) | RBF: Router FSCK fails after HDFS-14442 | Major | rbf | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17281](https://issues.apache.org/jira/browse/HADOOP-17281) | Implement FileSystem.listStatusIterator() in S3AFileSystem | Major | fs/s3 | Mukund Thakur | Mukund Thakur | +| [HADOOP-17293](https://issues.apache.org/jira/browse/HADOOP-17293) | S3A to always probe S3 in S3A getFileStatus on non-auth paths | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HDFS-15620](https://issues.apache.org/jira/browse/HDFS-15620) | RBF: Fix test failures after HADOOP-17281 | Major | rbf, test | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17261](https://issues.apache.org/jira/browse/HADOOP-17261) | s3a rename() now requires s3:deleteObjectVersion permission | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-16915](https://issues.apache.org/jira/browse/HADOOP-16915) | ABFS: Test failure ITestAzureBlobFileSystemRandomRead.testRandomReadPerformance | Major | . | Bilahari T H | Bilahari T H | +| [HADOOP-17166](https://issues.apache.org/jira/browse/HADOOP-17166) | ABFS: configure output stream thread pool | Minor | fs/azure | Bilahari T H | Bilahari T H | +| [HADOOP-17301](https://issues.apache.org/jira/browse/HADOOP-17301) | ABFS: read-ahead error reporting breaks buffer management | Critical | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17288](https://issues.apache.org/jira/browse/HADOOP-17288) | Use shaded guava from thirdparty | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-15459](https://issues.apache.org/jira/browse/HDFS-15459) | TestBlockTokenWithDFSStriped fails intermittently | Major | hdfs | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15461](https://issues.apache.org/jira/browse/HDFS-15461) | TestDFSClientRetries#testGetFileChecksum fails intermittently | Major | dfsclient, test | Ahmed Hussein | Ahmed Hussein | +| [HDFS-9776](https://issues.apache.org/jira/browse/HDFS-9776) | TestHAAppend#testMultipleAppendsDuringCatchupTailing is flaky | Major | . | Vinayakumar B | Ahmed Hussein | +| [HDFS-15657](https://issues.apache.org/jira/browse/HDFS-15657) | RBF: TestRouter#testNamenodeHeartBeatEnableDefault fails by BindException | Major | rbf, test | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17305](https://issues.apache.org/jira/browse/HADOOP-17305) | ITestCustomSigner fails with gcs s3 compatible endpoint. | Major | fs/s3 | Mukund Thakur | Mukund Thakur | +| [HDFS-15643](https://issues.apache.org/jira/browse/HDFS-15643) | EC: Fix checksum computation in case of native encoders | Blocker | . | Ahmed Hussein | Ayush Saxena | +| [HADOOP-17344](https://issues.apache.org/jira/browse/HADOOP-17344) | Harmonize guava version and shade guava in yarn-csi | Major | . | Wei-Chiu Chuang | Akira Ajisaka | +| [HADOOP-17376](https://issues.apache.org/jira/browse/HADOOP-17376) | ITestS3AContractRename failing against stricter tests | Major | fs/s3, test | Steve Loughran | Attila Doroszlai | +| [HADOOP-17379](https://issues.apache.org/jira/browse/HADOOP-17379) | AbstractS3ATokenIdentifier to set issue date == now | Major | fs/s3 | Steve Loughran | Jungtaek Lim | +| [HADOOP-17244](https://issues.apache.org/jira/browse/HADOOP-17244) | HADOOP-17244. S3A directory delete tombstones dir markers prematurely. | Blocker | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17388](https://issues.apache.org/jira/browse/HADOOP-17388) | AbstractS3ATokenIdentifier to issue date in UTC | Major | . | Steve Loughran | Jungtaek Lim | +| [HADOOP-17343](https://issues.apache.org/jira/browse/HADOOP-17343) | Upgrade aws-java-sdk to 1.11.901 | Minor | build, fs/s3 | Dongjoon Hyun | Steve Loughran | +| [HADOOP-17325](https://issues.apache.org/jira/browse/HADOOP-17325) | WASB: Test failures | Major | fs/azure, test | Sneha Vijayarajan | Steve Loughran | +| [HADOOP-17323](https://issues.apache.org/jira/browse/HADOOP-17323) | s3a getFileStatus("/") to skip IO | Minor | fs/s3 | Steve Loughran | Mukund Thakur | +| [HADOOP-17311](https://issues.apache.org/jira/browse/HADOOP-17311) | ABFS: Logs should redact SAS signature | Major | fs/azure, security | Sneha Vijayarajan | Bilahari T H | +| [HADOOP-17313](https://issues.apache.org/jira/browse/HADOOP-17313) | FileSystem.get to support slow-to-instantiate FS clients | Major | fs, fs/azure, fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17394](https://issues.apache.org/jira/browse/HADOOP-17394) | [JDK 11] mvn package -Pdocs fails | Major | build, documentation | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17396](https://issues.apache.org/jira/browse/HADOOP-17396) | ABFS: testRenameFileOverExistingFile Fails after Contract test update | Major | fs/azure, test | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17318](https://issues.apache.org/jira/browse/HADOOP-17318) | S3A committer to support concurrent jobs with same app attempt ID & dest dir | Minor | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17385](https://issues.apache.org/jira/browse/HADOOP-17385) | ITestS3ADeleteCost.testDirMarkersFileCreation failure | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-15844](https://issues.apache.org/jira/browse/HADOOP-15844) | tag S3GuardTool entry points as limitedPrivate("management-tools")/evolving | Minor | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17332](https://issues.apache.org/jira/browse/HADOOP-17332) | S3A marker tool mixes up -min and -max | Trivial | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17397](https://issues.apache.org/jira/browse/HADOOP-17397) | ABFS: SAS Test updates for version and permission update | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HDFS-15708](https://issues.apache.org/jira/browse/HDFS-15708) | TestURLConnectionFactory fails by NoClassDefFoundError in branch-3.3 and branch-3.2 | Blocker | test | Akira Ajisaka | Chao Sun | +| [HDFS-15716](https://issues.apache.org/jira/browse/HDFS-15716) | TestUpgradeDomainBlockPlacementPolicy flaky | Major | namenode, test | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17422](https://issues.apache.org/jira/browse/HADOOP-17422) | ABFS: Set default ListMaxResults to max server limit | Major | fs/azure | Sumangala Patki | Thomas Marqardt | +| [HADOOP-17450](https://issues.apache.org/jira/browse/HADOOP-17450) | hadoop-common to add IOStatistics API | Major | fs | Steve Loughran | Steve Loughran | +| [HADOOP-17271](https://issues.apache.org/jira/browse/HADOOP-17271) | S3A statistics to support IOStatistics | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17347](https://issues.apache.org/jira/browse/HADOOP-17347) | ABFS: Optimise read for small files/tails of files | Major | fs/azure | Bilahari T H | Bilahari T H | +| [HADOOP-17272](https://issues.apache.org/jira/browse/HADOOP-17272) | ABFS Streams to support IOStatistics API | Major | fs/azure | Steve Loughran | Mehakmeet Singh | +| [HADOOP-17451](https://issues.apache.org/jira/browse/HADOOP-17451) | Intermittent failure of S3A tests which make assertions on statistics/IOStatistics | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HDFS-15762](https://issues.apache.org/jira/browse/HDFS-15762) | TestMultipleNNPortQOP#testMultipleNNPortOverwriteDownStream fails intermittently | Minor | . | Toshihiko Uchida | Toshihiko Uchida | +| [HDFS-15672](https://issues.apache.org/jira/browse/HDFS-15672) | TestBalancerWithMultipleNameNodes#testBalancingBlockpoolsWithBlockPoolPolicy fails on trunk | Major | . | Ahmed Hussein | Masatake Iwasaki | +| [HADOOP-13845](https://issues.apache.org/jira/browse/HADOOP-13845) | s3a to instrument duration of HTTP calls | Minor | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17456](https://issues.apache.org/jira/browse/HADOOP-17456) | S3A ITestPartialRenamesDeletes.testPartialDirDelete[bulk-delete=true] failure | Minor | fs/s3, test | Steve Loughran | Steve Loughran | +| [HADOOP-17455](https://issues.apache.org/jira/browse/HADOOP-17455) | [s3a] Intermittent failure of ITestS3ADeleteCost.testDeleteSingleFileInDir | Major | fs/s3, test | Gabor Bota | Steve Loughran | +| [HADOOP-17433](https://issues.apache.org/jira/browse/HADOOP-17433) | Skipping network I/O in S3A getFileStatus(/) breaks ITestAssumeRole | Minor | fs/s3, test | Steve Loughran | Steve Loughran | +| [HADOOP-17296](https://issues.apache.org/jira/browse/HADOOP-17296) | ABFS: Allow Random Reads to be of Buffer Size | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17413](https://issues.apache.org/jira/browse/HADOOP-17413) | ABFS: Release Elastic ByteBuffer pool memory at outputStream close | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17407](https://issues.apache.org/jira/browse/HADOOP-17407) | ABFS: Delete Idempotency handling can lead to NPE | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17404](https://issues.apache.org/jira/browse/HADOOP-17404) | ABFS: Piggyback flush on Append calls for short writes | Major | fs/azure | Sneha Vijayarajan | Sneha Vijayarajan | +| [HADOOP-17480](https://issues.apache.org/jira/browse/HADOOP-17480) | S3A docs to state s3 is consistent, deprecate S3Guard | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17414](https://issues.apache.org/jira/browse/HADOOP-17414) | Magic committer files don't have the count of bytes written collected by spark | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17493](https://issues.apache.org/jira/browse/HADOOP-17493) | renaming S3A Statistic DELEGATION\_TOKENS\_ISSUED to DELEGATION\_TOKEN\_ISSUED broke tests downstream | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17483](https://issues.apache.org/jira/browse/HADOOP-17483) | magic committer to be enabled for all S3 buckets | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17337](https://issues.apache.org/jira/browse/HADOOP-17337) | S3A NetworkBinding has a runtime class dependency on a third-party shaded class | Blocker | fs/s3 | Chris Wensel | Steve Loughran | +| [HADOOP-17475](https://issues.apache.org/jira/browse/HADOOP-17475) | ABFS : add high performance listStatusIterator | Major | fs/azure | Bilahari T H | Bilahari T H | +| [HADOOP-17432](https://issues.apache.org/jira/browse/HADOOP-17432) | [JDK 16] KerberosUtil#getOidInstance is broken by JEP 396 | Major | auth | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-13327](https://issues.apache.org/jira/browse/HADOOP-13327) | Add OutputStream + Syncable to the Filesystem Specification | Major | fs | Steve Loughran | Steve Loughran | +| [HDFS-15836](https://issues.apache.org/jira/browse/HDFS-15836) | RBF: Fix contract tests after HADOOP-13327 | Major | rbf | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17038](https://issues.apache.org/jira/browse/HADOOP-17038) | Support disabling buffered reads in ABFS positional reads | Major | . | Anoop Sam John | Anoop Sam John | +| [HADOOP-15710](https://issues.apache.org/jira/browse/HADOOP-15710) | ABFS checkException to map 403 to AccessDeniedException | Blocker | fs/azure | Steve Loughran | Steve Loughran | +| [HDFS-15847](https://issues.apache.org/jira/browse/HDFS-15847) | create client protocol: add ecPolicyName & storagePolicy param to debug statement string | Minor | . | Bhavik Patel | Bhavik Patel | +| [HADOOP-16748](https://issues.apache.org/jira/browse/HADOOP-16748) | Migrate to Python 3 and upgrade Yetus to 0.13.0 | Major | . | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-16906](https://issues.apache.org/jira/browse/HADOOP-16906) | Add some Abortable.abort() interface for streams etc which can be terminated | Blocker | fs, fs/azure, fs/s3 | Steve Loughran | Jungtaek Lim | +| [HADOOP-17567](https://issues.apache.org/jira/browse/HADOOP-17567) | typo in MagicCommitTracker | Trivial | fs/s3 | Pierrick HYMBERT | Pierrick HYMBERT | +| [HADOOP-17191](https://issues.apache.org/jira/browse/HADOOP-17191) | ABFS: Run the integration tests with various combinations of configurations and publish consolidated results | Minor | fs/azure, test | Bilahari T H | Bilahari T H | +| [HADOOP-16721](https://issues.apache.org/jira/browse/HADOOP-16721) | Improve S3A rename resilience | Blocker | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17548](https://issues.apache.org/jira/browse/HADOOP-17548) | ABFS: Toggle Store Mkdirs request overwrite parameter | Major | fs/azure | Sumangala Patki | Sumangala Patki | +| [HADOOP-17537](https://issues.apache.org/jira/browse/HADOOP-17537) | Correct abfs test assertion reversed in HADOOP-13327 | Major | fs/azure, test | Sumangala Patki | Sumangala Patki | +| [HDFS-15890](https://issues.apache.org/jira/browse/HDFS-15890) | Improve the Logs for File Concat Operation | Minor | namenode | Bhavik Patel | Bhavik Patel | +| [HDFS-13975](https://issues.apache.org/jira/browse/HDFS-13975) | TestBalancer#testMaxIterationTime fails sporadically | Major | . | Jason Darrell Lowe | Toshihiko Uchida | +| [YARN-10688](https://issues.apache.org/jira/browse/YARN-10688) | ClusterMetrics should support GPU capacity related metrics. | Major | metrics, resourcemanager | Qi Zhu | Qi Zhu | +| [YARN-10692](https://issues.apache.org/jira/browse/YARN-10692) | Add Node GPU Utilization and apply to NodeMetrics. | Major | . | Qi Zhu | Qi Zhu | +| [HDFS-15902](https://issues.apache.org/jira/browse/HDFS-15902) | Improve the log for HTTPFS server operation | Minor | httpfs | Bhavik Patel | Bhavik Patel | +| [HADOOP-17476](https://issues.apache.org/jira/browse/HADOOP-17476) | ITestAssumeRole.testAssumeRoleBadInnerAuth failure | Major | fs/s3, test | Steve Loughran | Steve Loughran | +| [HADOOP-13551](https://issues.apache.org/jira/browse/HADOOP-13551) | Collect AwsSdkMetrics in S3A FileSystem IOStatistics | Blocker | fs/s3 | Steve Loughran | Steve Loughran | +| [YARN-10713](https://issues.apache.org/jira/browse/YARN-10713) | ClusterMetrics should support custom resource capacity related metrics. | Major | . | Qi Zhu | Qi Zhu | +| [HDFS-15921](https://issues.apache.org/jira/browse/HDFS-15921) | Improve the log for the Storage Policy Operations | Minor | namenode | Bhavik Patel | Bhavik Patel | +| [YARN-10702](https://issues.apache.org/jira/browse/YARN-10702) | Add cluster metric for amount of CPU used by RM Event Processor | Minor | yarn | Jim Brennan | Jim Brennan | +| [YARN-10503](https://issues.apache.org/jira/browse/YARN-10503) | Support queue capacity in terms of absolute resources with custom resourceType. | Critical | . | Qi Zhu | Qi Zhu | +| [HADOOP-17630](https://issues.apache.org/jira/browse/HADOOP-17630) | [JDK 15] TestPrintableString fails due to Unicode 13.0 support | Major | . | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17576](https://issues.apache.org/jira/browse/HADOOP-17576) | ABFS: Disable throttling update for auth failures | Major | fs/azure | Sumangala Patki | Sumangala Patki | +| [YARN-10723](https://issues.apache.org/jira/browse/YARN-10723) | Change CS nodes page in UI to support custom resource. | Major | . | Qi Zhu | Qi Zhu | +| [HADOOP-16948](https://issues.apache.org/jira/browse/HADOOP-16948) | ABFS: Support infinite lease dirs | Minor | . | Billie Rinaldi | Billie Rinaldi | +| [HADOOP-17471](https://issues.apache.org/jira/browse/HADOOP-17471) | ABFS to collect IOStatistics | Major | fs/azure | Steve Loughran | Mehakmeet Singh | +| [HADOOP-17535](https://issues.apache.org/jira/browse/HADOOP-17535) | ABFS: ITestAzureBlobFileSystemCheckAccess test failure if test doesn't have oauth keys | Major | fs/azure | Steve Loughran | Steve Loughran | +| [HADOOP-17112](https://issues.apache.org/jira/browse/HADOOP-17112) | whitespace not allowed in paths when saving files to s3a via committer | Blocker | fs/s3 | Krzysztof Adamski | Krzysztof Adamski | +| [HADOOP-17597](https://issues.apache.org/jira/browse/HADOOP-17597) | Add option to downgrade S3A rejection of Syncable to warning | Minor | . | Steve Loughran | Steve Loughran | +| [HADOOP-17661](https://issues.apache.org/jira/browse/HADOOP-17661) | mvn versions:set fails to parse pom.xml | Blocker | build | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HADOOP-17536](https://issues.apache.org/jira/browse/HADOOP-17536) | ABFS: Suport for customer provided encryption key | Minor | fs/azure | Bilahari T H | Bilahari T H | +| [YARN-10707](https://issues.apache.org/jira/browse/YARN-10707) | Support custom resources in ResourceUtilization, and update Node GPU Utilization to use. | Major | yarn | Qi Zhu | Qi Zhu | +| [HADOOP-17653](https://issues.apache.org/jira/browse/HADOOP-17653) | Do not use guava's Files.createTempDir() | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HDFS-15952](https://issues.apache.org/jira/browse/HDFS-15952) | TestRouterRpcMultiDestination#testProxyGetTransactionID and testProxyVersionRequest are flaky | Major | rbf | Harunobu Daikoku | Akira Ajisaka | +| [HADOOP-16742](https://issues.apache.org/jira/browse/HADOOP-16742) | Possible NPE in S3A MultiObjectDeleteSupport error handling | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HADOOP-17644](https://issues.apache.org/jira/browse/HADOOP-17644) | Add back the exceptions removed by HADOOP-17432 for compatibility | Blocker | bulid | Akira Ajisaka | Quan Li | +| [YARN-10642](https://issues.apache.org/jira/browse/YARN-10642) | Race condition: AsyncDispatcher can get stuck by the changes introduced in YARN-8995 | Critical | resourcemanager | zhengchenyu | zhengchenyu | +| [YARN-9615](https://issues.apache.org/jira/browse/YARN-9615) | Add dispatcher metrics to RM | Major | . | Jonathan Hung | Qi Zhu | +| [HDFS-13934](https://issues.apache.org/jira/browse/HDFS-13934) | Multipart uploaders to be created through API call to FileSystem/FileContext, not service loader | Major | fs, fs/s3, hdfs | Steve Loughran | Steve Loughran | +| [HADOOP-17665](https://issues.apache.org/jira/browse/HADOOP-17665) | Ignore missing keystore configuration in reloading mechanism | Major | . | Borislav Iordanov | Borislav Iordanov | +| [HADOOP-17663](https://issues.apache.org/jira/browse/HADOOP-17663) | Remove useless property hadoop.assemblies.version in pom file | Trivial | build | Wei-Chiu Chuang | Akira Ajisaka | +| [HADOOP-17666](https://issues.apache.org/jira/browse/HADOOP-17666) | Update LICENSE for 3.3.1 | Blocker | . | Wei-Chiu Chuang | Wei-Chiu Chuang | + + +### OTHER: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HADOOP-17430](https://issues.apache.org/jira/browse/HADOOP-17430) | Restore ability to set Text to empty byte array | Minor | common | gaozhan ding | gaozhan ding | +| [HDFS-15870](https://issues.apache.org/jira/browse/HDFS-15870) | Remove unused configuration dfs.namenode.stripe.min | Minor | . | tomscut | tomscut | +| [HDFS-15808](https://issues.apache.org/jira/browse/HDFS-15808) | Add metrics for FSNamesystem read/write lock hold long time | Major | hdfs | tomscut | tomscut | +| [HDFS-15873](https://issues.apache.org/jira/browse/HDFS-15873) | Add namenode address in logs for block report | Minor | datanode, hdfs | tomscut | tomscut | +| [HDFS-15906](https://issues.apache.org/jira/browse/HDFS-15906) | Close FSImage and FSNamesystem after formatting is complete | Minor | . | tomscut | tomscut | +| [HDFS-15892](https://issues.apache.org/jira/browse/HDFS-15892) | Add metric for editPendingQ in FSEditLogAsync | Minor | . | tomscut | tomscut | +| [HDFS-15951](https://issues.apache.org/jira/browse/HDFS-15951) | Remove unused parameters in NameNodeProxiesClient | Minor | . | tomscut | tomscut | +| [HDFS-15975](https://issues.apache.org/jira/browse/HDFS-15975) | Use LongAdder instead of AtomicLong | Minor | . | tomscut | tomscut | +| [HDFS-15970](https://issues.apache.org/jira/browse/HDFS-15970) | Print network topology on the web | Minor | . | tomscut | tomscut | +| [HDFS-15991](https://issues.apache.org/jira/browse/HDFS-15991) | Add location into datanode info for NameNodeMXBean | Minor | . | tomscut | tomscut | +| [HADOOP-17055](https://issues.apache.org/jira/browse/HADOOP-17055) | Remove residual code of Ozone | Major | . | Wanqiang Ji | Wanqiang Ji | +| [YARN-10274](https://issues.apache.org/jira/browse/YARN-10274) | Merge QueueMapping and QueueMappingEntity | Major | yarn | Gergely Pollák | Gergely Pollák | +| [YARN-10281](https://issues.apache.org/jira/browse/YARN-10281) | Redundant QueuePath usage in UserGroupMappingPlacementRule and AppNameMappingPlacementRule | Major | . | Gergely Pollák | Gergely Pollák | +| [YARN-10279](https://issues.apache.org/jira/browse/YARN-10279) | Avoid unnecessary QueueMappingEntity creations | Minor | . | Gergely Pollák | Hudáky Márton Gyula | +| [YARN-10277](https://issues.apache.org/jira/browse/YARN-10277) | CapacityScheduler test TestUserGroupMappingPlacementRule should build proper hierarchy | Major | . | Gergely Pollák | Szilard Nemeth | +| [HADOOP-16990](https://issues.apache.org/jira/browse/HADOOP-16990) | Update Mockserver | Major | . | Wei-Chiu Chuang | Attila Doroszlai | +| [YARN-10278](https://issues.apache.org/jira/browse/YARN-10278) | CapacityScheduler test framework ProportionalCapacityPreemptionPolicyMockFramework need some review | Major | . | Gergely Pollák | Szilard Nemeth | +| [YARN-10540](https://issues.apache.org/jira/browse/YARN-10540) | Node page is broken in YARN UI1 and UI2 including RMWebService api for nodes | Critical | webapp | Sunil G | Jim Brennan | +| [HADOOP-17445](https://issues.apache.org/jira/browse/HADOOP-17445) | Update the year to 2021 | Major | . | Xiaoqiao He | Xiaoqiao He | +| [HDFS-15731](https://issues.apache.org/jira/browse/HDFS-15731) | Reduce threadCount for unit tests to reduce the memory usage | Major | build, test | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17571](https://issues.apache.org/jira/browse/HADOOP-17571) | Upgrade com.fasterxml.woodstox:woodstox-core for security reasons | Major | . | Viraj Jasani | Viraj Jasani | +| [HDFS-15895](https://issues.apache.org/jira/browse/HDFS-15895) | DFSAdmin#printOpenFiles has redundant String#format usage | Minor | . | Viraj Jasani | Viraj Jasani | +| [HDFS-15926](https://issues.apache.org/jira/browse/HDFS-15926) | Removed duplicate dependency of hadoop-annotations | Minor | . | Viraj Jasani | Viraj Jasani | +| [HADOOP-17614](https://issues.apache.org/jira/browse/HADOOP-17614) | Bump netty to the latest 4.1.61 | Blocker | . | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HADOOP-17622](https://issues.apache.org/jira/browse/HADOOP-17622) | Avoid usage of deprecated IOUtils#cleanup API | Minor | . | Viraj Jasani | Viraj Jasani | +| [HADOOP-17624](https://issues.apache.org/jira/browse/HADOOP-17624) | Remove any rocksdb exclusion code | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HADOOP-17625](https://issues.apache.org/jira/browse/HADOOP-17625) | Update to Jetty 9.4.39 | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HDFS-15989](https://issues.apache.org/jira/browse/HDFS-15989) | Split TestBalancer into two classes | Major | . | Viraj Jasani | Viraj Jasani | +| [HADOOP-17676](https://issues.apache.org/jira/browse/HADOOP-17676) | Restrict imports from org.apache.curator.shaded | Major | . | Viraj Jasani | Viraj Jasani | +| [HADOOP-17683](https://issues.apache.org/jira/browse/HADOOP-17683) | Update commons-io to 2.8.0 | Major | . | Wei-Chiu Chuang | Akira Ajisaka | +| [HADOOP-17426](https://issues.apache.org/jira/browse/HADOOP-17426) | Upgrade to hadoop-thirdparty-1.1.0 | Major | . | Ayush Saxena | Wei-Chiu Chuang | +| [HADOOP-17739](https://issues.apache.org/jira/browse/HADOOP-17739) | Use hadoop-thirdparty 1.1.1 | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang | + + diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.3.1/RELEASENOTES.3.3.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.3.1/RELEASENOTES.3.3.1.md new file mode 100644 index 0000000000000..238dd5764693b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.3.1/RELEASENOTES.3.3.1.md @@ -0,0 +1,293 @@ + + +# Apache Hadoop 3.3.1 Release Notes + +These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements. + + +--- + +* [HADOOP-16054](https://issues.apache.org/jira/browse/HADOOP-16054) | *Major* | **Update Dockerfile to use Bionic** + +The build image has been upgraded to Bionic. + + +--- + +* [HDFS-15281](https://issues.apache.org/jira/browse/HDFS-15281) | *Major* | **ZKFC ignores dfs.namenode.rpc-bind-host and uses dfs.namenode.rpc-address to bind to host address** + +ZKFC binds host address to "dfs.namenode.servicerpc-bind-host", if configured. Otherwise, it binds to "dfs.namenode.rpc-bind-host". If neither of those is configured, ZKFC binds itself to NameNode RPC server address (effectively "dfs.namenode.rpc-address"). + + +--- + +* [HADOOP-16916](https://issues.apache.org/jira/browse/HADOOP-16916) | *Minor* | **ABFS: Delegation SAS generator for integration with Ranger** + +Azure ABFS support for Shared Access Signatures (SAS) + + +--- + +* [HADOOP-17044](https://issues.apache.org/jira/browse/HADOOP-17044) | *Major* | **Revert "HADOOP-8143. Change distcp to have -pb on by default"** + +Distcp block size is not preserved by default, unless -pb is specified. This restores the behavior prior to Hadoop 3. + + +--- + +* [HADOOP-17024](https://issues.apache.org/jira/browse/HADOOP-17024) | *Major* | **ListStatus on ViewFS root (ls "/") should list the linkFallBack root (configured target root).** + +ViewFS#listStatus on root("/") considers listing from fallbackLink if available. If the same directory name is present in configured mount path as well as in fallback link, then only the configured mount path will be listed in the returned result. + + +--- + +* [HDFS-13183](https://issues.apache.org/jira/browse/HDFS-13183) | *Major* | **Standby NameNode process getBlocks request to reduce Active load** + +Enable balancer to redirect getBlocks request to a Standby Namenode, thus reducing the performance impact of balancer to the Active NameNode. + +The feature is disabled by default. To enable it, configure the hdfs-site.xml of balancer: +dfs.ha.allow.stale.reads = true. + + +--- + +* [HADOOP-17076](https://issues.apache.org/jira/browse/HADOOP-17076) | *Minor* | **ABFS: Delegation SAS Generator Updates** + +Azure Blob File System (ABFS) SAS Generator Update + + +--- + +* [HADOOP-17089](https://issues.apache.org/jira/browse/HADOOP-17089) | *Critical* | **WASB: Update azure-storage-java SDK** + +Azure WASB bug fix that can cause list results to appear empty. + + +--- + +* [HADOOP-17105](https://issues.apache.org/jira/browse/HADOOP-17105) | *Minor* | **S3AFS globStatus attempts to resolve symlinks** + +Remove unnecessary symlink resolution in S3AFileSystem globStatus + + +--- + +* [HADOOP-13230](https://issues.apache.org/jira/browse/HADOOP-13230) | *Major* | **S3A to optionally retain directory markers** + +The S3A connector now has an option to stop deleting directory markers as files are written. This eliminates the IO throttling the operations can cause, and avoids creating tombstone markers on versioned S3 buckets. + +This feature is incompatible with all versions of Hadoop which lack the HADOOP-17199 change to list and getFileStatus calls. + +Consult the S3A documentation for further details + + +--- + +* [HADOOP-17215](https://issues.apache.org/jira/browse/HADOOP-17215) | *Major* | **ABFS: Support for conditional overwrite** + +ABFS: Support for conditional overwrite. + + +--- + +* [YARN-9809](https://issues.apache.org/jira/browse/YARN-9809) | *Major* | **NMs should supply a health status when registering with RM** + +Improved node registration with node health status. + + +--- + +* [HADOOP-17125](https://issues.apache.org/jira/browse/HADOOP-17125) | *Major* | **Using snappy-java in SnappyCodec** + +The SnappyCodec uses the snappy-java compression library, rather than explicitly referencing native binaries. It contains the native libraries for many operating systems and instruction sets, falling back to a pure java implementation. It does requires the snappy-java.jar is on the classpath. It can be found in hadoop-common/lib, and has already been present as part of the avro dependencies + + +--- + +* [HDFS-15253](https://issues.apache.org/jira/browse/HDFS-15253) | *Major* | **Set default throttle value on dfs.image.transfer.bandwidthPerSec** + +The configuration dfs.image.transfer.bandwidthPerSec which defines the maximum bandwidth available for fsimage transfer is changed from 0 (meaning no throttle at all) to 50MB/s. + + +--- + +* [HADOOP-17021](https://issues.apache.org/jira/browse/HADOOP-17021) | *Minor* | **Add concat fs command** + +"hadoop fs" has a concat command. Available on all filesystems which support the concat API including HDFS and WebHDFS + + +--- + +* [HADOOP-17292](https://issues.apache.org/jira/browse/HADOOP-17292) | *Major* | **Using lz4-java in Lz4Codec** + +The Hadoop's LZ4 compression codec now depends on lz4-java. The native LZ4 is performed by the encapsulated JNI and it is no longer necessary to install and configure the lz4 system package. + +The lz4-java is declared in provided scope. Applications that wish to use lz4 codec must declare dependency on lz4-java explicitly. + + +--- + +* [HADOOP-17313](https://issues.apache.org/jira/browse/HADOOP-17313) | *Major* | **FileSystem.get to support slow-to-instantiate FS clients** + +The option "fs.creation.parallel.count" sets a a semaphore to throttle the number of FileSystem instances which +can be created simultaneously. + +This is designed to reduce the impact of many threads in an application calling +FileSystem.get() on a filesystem which takes time to instantiate -for example +to an object where HTTPS connections are set up during initialization. +Many threads trying to do this may create spurious delays by conflicting +for access to synchronized blocks, when simply limiting the parallelism +diminishes the conflict, so speeds up all threads trying to access +the store. + +The default value, 64, is larger than is likely to deliver any speedup -but +it does mean that there should be no adverse effects from the change. + +If a service appears to be blocking on all threads initializing connections to +abfs, s3a or store, try a smaller (possibly significantly smaller) value. + + +--- + +* [HADOOP-17338](https://issues.apache.org/jira/browse/HADOOP-17338) | *Major* | **Intermittent S3AInputStream failures: Premature end of Content-Length delimited message body etc** + +**WARNING: No release note provided for this change.** + + +--- + +* [HDFS-15380](https://issues.apache.org/jira/browse/HDFS-15380) | *Major* | **RBF: Could not fetch real remote IP in RouterWebHdfsMethods** + +**WARNING: No release note provided for this change.** + + +--- + +* [HADOOP-17422](https://issues.apache.org/jira/browse/HADOOP-17422) | *Major* | **ABFS: Set default ListMaxResults to max server limit** + +ABFS: The default value for "fs.azure.list.max.results" was changed from 500 to 5000. + + +--- + +* [HDFS-15719](https://issues.apache.org/jira/browse/HDFS-15719) | *Critical* | **[Hadoop 3] Both NameNodes can crash simultaneously due to the short JN socket timeout** + +The default value of the configuration hadoop.http.idle\_timeout.ms (how long does Jetty disconnect an idle connection) is changed from 10000 to 60000. +This property is inlined during compile time, so an application that references this property must be recompiled in order for it to take effect. + + +--- + +* [HADOOP-17454](https://issues.apache.org/jira/browse/HADOOP-17454) | *Major* | **[s3a] Disable bucket existence check - set fs.s3a.bucket.probe to 0** + +S3A bucket existence check is disabled (fs.s3a.bucket.probe is 0), so there will be no existence check on the bucket during the S3AFileSystem initialization. The first operation which attempts to interact with the bucket which will fail if the bucket does not exist. + + +--- + +* [HADOOP-17337](https://issues.apache.org/jira/browse/HADOOP-17337) | *Blocker* | **S3A NetworkBinding has a runtime class dependency on a third-party shaded class** + +the s3a filesystem will link against the unshaded AWS s3 SDK. Making an application's dependencies consistent with that SDK is left as exercise. Note: native openssl is not supported as a socket factory in unshaded deployments. + + +--- + +* [HADOOP-16748](https://issues.apache.org/jira/browse/HADOOP-16748) | *Major* | **Migrate to Python 3 and upgrade Yetus to 0.13.0** + + +- Upgraded Yetus to 0.13.0. +- Removed determine-flaky-tests-hadoop.py. +- Temporarily disabled shelldocs check in the Jenkins jobs due to YETUS-1099. + + +--- + +* [HADOOP-16721](https://issues.apache.org/jira/browse/HADOOP-16721) | *Blocker* | **Improve S3A rename resilience** + +The S3A connector's rename() operation now raises FileNotFoundException if the source doesn't exist; FileAlreadyExistsException if the destination is unsuitable. It no longer checks for a parent directory existing -instead it simply verifies that there is no file immediately above the destination path. + + +--- + +* [HADOOP-17531](https://issues.apache.org/jira/browse/HADOOP-17531) | *Critical* | **DistCp: Reduce memory usage on copying huge directories** + +Added a -useiterator option in distcp which uses listStatusIterator for building the listing. Primarily to reduce memory usage at client for building listing. + + +--- + +* [HADOOP-16870](https://issues.apache.org/jira/browse/HADOOP-16870) | *Major* | **Use spotbugs-maven-plugin instead of findbugs-maven-plugin** + +Removed findbugs from the hadoop build images and added spotbugs instead. +Upgraded SpotBugs to 4.2.2 and spotbugs-maven-plugin to 4.2.0. + + +--- + +* [HADOOP-17222](https://issues.apache.org/jira/browse/HADOOP-17222) | *Major* | ** Create socket address leveraging URI cache** + +DFS client can use the newly added URI cache when creating socket address for read operations. By default it is disabled. When enabled, creating socket address will use cached URI object based on host:port to reduce the frequency of URI object creation. + +To enable it, set the following config key to true: +\ + \dfs.client.read.uri.cache.enabled\ + \true\ +\ + + +--- + +* [HADOOP-16524](https://issues.apache.org/jira/browse/HADOOP-16524) | *Major* | **Automatic keystore reloading for HttpServer2** + +Adds auto-reload of keystore. + +Adds below new config (default 10 seconds): + + ssl.{0}.stores.reload.interval + +The refresh interval used to check if either of the truststore or keystore certificate file has changed. + + +--- + +* [HDFS-15942](https://issues.apache.org/jira/browse/HDFS-15942) | *Major* | **Increase Quota initialization threads** + +The default quota initialization thread count during the NameNode startup process (dfs.namenode.quota.init-threads) is increased from 4 to 12. + + +--- + +* [HDFS-15975](https://issues.apache.org/jira/browse/HDFS-15975) | *Minor* | **Use LongAdder instead of AtomicLong** + +This JIRA changes public fields in DFSHedgedReadMetrics. If you are using the public member variables of DFSHedgedReadMetrics, you need to use them through the public API. + + +--- + +* [HADOOP-17597](https://issues.apache.org/jira/browse/HADOOP-17597) | *Minor* | **Add option to downgrade S3A rejection of Syncable to warning** + +The S3A output streams now raise UnsupportedOperationException on calls to Syncable.hsync() or Syncable.hflush(). This is to make absolutely clear to programs trying to use the syncable API that the stream doesn't save any data at all until close. Programs which use this to flush their write ahead logs will fail immediately, rather than appear to succeed but without saving any data. + +To downgrade the API calls to simply printing a warning, set fs.s3a.downgrade.syncable.exceptions" to true. This will not change the other behaviour: no data is saved. + +Object stores are not filesystems. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.3.1.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.3.1.xml new file mode 100644 index 0000000000000..d4444cf5cb065 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.3.1.xml @@ -0,0 +1,835 @@ + + + + + + + + + + + A distributed implementation of {@link +org.apache.hadoop.fs.FileSystem}. This is loosely modelled after +Google's GFS.

    + +

    The most important difference is that unlike GFS, Hadoop DFS files +have strictly one writer at any one time. Bytes are always appended +to the end of the writer's stream. There is no notion of "record appends" +or "mutations" that are then checked or reordered. Writers simply emit +a byte stream. That byte stream is guaranteed to be stored in the +order written.

    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This method must return as quickly as possible, since it's called + in a critical section of the NameNode's operation. + + @param succeeded Whether authorization succeeded. + @param userName Name of the user executing the request. + @param addr Remote address of the request. + @param cmd The requested command. + @param src Path of affected source file. + @param dst Path of affected destination file (if any). + @param stat File information for operations that change the file's + metadata (permissions, owner, times, etc).]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index 10a0526519de2..024cf055207c5 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -134,7 +134,7 @@ false - 3.2.2 + 3.3.1 -unstable From d42196ed97947864f1469c132cd926e1f3780238 Mon Sep 17 00:00:00 2001 From: Jim Brennan Date: Tue, 15 Jun 2021 18:58:42 +0000 Subject: [PATCH 418/666] YARN-10767. Yarn Logs Command retrying on Standby RM for 30 times. Contributed by D M Murali Krishna Reddy. --- .../apache/hadoop/yarn/util/RMHAUtils.java | 3 ++- .../hadoop/yarn/webapp/util/WebAppUtils.java | 26 +++++++------------ 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RMHAUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RMHAUtils.java index 2e996e90ee8bd..1c379409c22bb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RMHAUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RMHAUtils.java @@ -23,6 +23,7 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -35,7 +36,7 @@ @Unstable public class RMHAUtils { - public static String findActiveRMHAId(YarnConfiguration conf) { + public static String findActiveRMHAId(Configuration conf) { YarnConfiguration yarnConf = new YarnConfiguration(conf); Collection rmIds = yarnConf.getStringCollection(YarnConfiguration.RM_HA_IDS); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java index 09daf424d154f..5b578197100a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.PATH_JOINER; import java.io.IOException; +import java.net.ConnectException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -97,24 +98,17 @@ public static void setNMWebAppHostNameAndPort(Configuration conf, */ public static R execOnActiveRM(Configuration conf, ThrowingBiFunction func, T arg) throws Exception { - String rm1Address = getRMWebAppURLWithScheme(conf, 0); - try { - return func.apply(rm1Address, arg); - } catch (Exception e) { - if (HAUtil.isHAEnabled(conf)) { - int rms = HAUtil.getRMHAIds(conf).size(); - for (int i=1; i(HAUtil.getRMHAIds(conf)).indexOf(activeRMId); + } else { + throw new ConnectException("No Active RM available"); } - throw e; } + String rm1Address = getRMWebAppURLWithScheme(conf, haIndex); + return func.apply(rm1Address, arg); } /** A BiFunction which throws on Exception. */ From 87661936d0d2ee6ce39878680240c59d074ef9e6 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 16 Jun 2021 15:53:12 +0530 Subject: [PATCH 419/666] HDFS-16073. Remove redundant RPC requests for getFileLinkInfo in ClientNamenodeProtocolTranslatorPB. Contributed by lei w. --- .../hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 7f7bdc8ef83d9..2668ec1cefe98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -996,9 +996,7 @@ public HdfsFileStatus getFileLinkInfo(String src) throws IOException { .setSrc(src).build(); try { GetFileLinkInfoResponseProto result = rpcProxy.getFileLinkInfo(null, req); - return result.hasFs() ? - PBHelperClient.convert(rpcProxy.getFileLinkInfo(null, req).getFs()) : - null; + return result.hasFs() ? PBHelperClient.convert(result.getFs()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } From dbdd327316f1d71eec6614356e1a812c047ce6ce Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Wed, 16 Jun 2021 18:26:58 +0200 Subject: [PATCH 420/666] YARN-10801. Fix Auto Queue template to properly set all configuration properties. Contributed by Andras Gyori --- .../scheduler/capacity/AbstractCSQueue.java | 18 ++++++-- .../scheduler/capacity/LeafQueue.java | 22 +++++++++- .../scheduler/capacity/ParentQueue.java | 41 ++++++------------- ...CapacitySchedulerNewQueueAutoCreation.java | 10 +++++ 4 files changed, 58 insertions(+), 33 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index be9a0e36391c5..94ade5e3fabce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -360,9 +360,8 @@ protected void setupQueueConfigs(Resource clusterResource, writeLock.lock(); try { - if (isDynamicQueue() && getParent() instanceof ParentQueue) { - ((ParentQueue) getParent()).getAutoCreatedQueueTemplate() - .setTemplateEntriesForChild(configuration, getQueuePath()); + if (isDynamicQueue()) { + setDynamicQueueProperties(configuration); } // get labels this.accessibleLabels = @@ -478,6 +477,19 @@ protected void setupQueueConfigs(Resource clusterResource, } } + /** + * Set properties specific to dynamic queues. + * @param configuration configuration on which the properties are set + */ + protected void setDynamicQueueProperties( + CapacitySchedulerConfiguration configuration) { + // Set properties from parent template + if (getParent() instanceof ParentQueue) { + ((ParentQueue) getParent()).getAutoCreatedQueueTemplate() + .setTemplateEntriesForChild(configuration, getQueuePath()); + } + } + private void setupMaximumAllocation(CapacitySchedulerConfiguration csConf) { String myQueuePath = getQueuePath(); Resource clusterMax = ResourceUtils diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index c76bf98f40c45..19de6db21b721 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -147,14 +147,21 @@ public class LeafQueue extends AbstractCSQueue { @SuppressWarnings({ "unchecked", "rawtypes" }) public LeafQueue(CapacitySchedulerContext cs, String queueName, CSQueue parent, CSQueue old) throws IOException { - this(cs, cs.getConfiguration(), queueName, parent, old); + this(cs, cs.getConfiguration(), queueName, parent, old, false); } public LeafQueue(CapacitySchedulerContext cs, CapacitySchedulerConfiguration configuration, - String queueName, CSQueue parent, CSQueue old) throws + String queueName, CSQueue parent, CSQueue old) throws IOException { + this(cs, configuration, queueName, parent, old, false); + } + + public LeafQueue(CapacitySchedulerContext cs, + CapacitySchedulerConfiguration configuration, + String queueName, CSQueue parent, CSQueue old, boolean isDynamic) throws IOException { super(cs, configuration, queueName, parent, old); + setDynamicQueue(isDynamic); this.scheduler = cs; this.usersManager = new UsersManager(metrics, this, labelManager, scheduler, @@ -1691,6 +1698,17 @@ protected boolean canAssignToUser(Resource clusterResource, } } + @Override + protected void setDynamicQueueProperties( + CapacitySchedulerConfiguration configuration) { + super.setDynamicQueueProperties(configuration); + // set to -1, to disable it + configuration.setUserLimitFactor(getQueuePath(), -1); + // Set Max AM percentage to a higher value + configuration.setMaximumApplicationMasterResourcePerQueuePercent( + getQueuePath(), 1f); + } + private void updateSchedulerHealthForCompletedContainer( RMContainer rmContainer, ContainerStatus containerStatus) { // Update SchedulerHealth for released / preempted container diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 6f850187bfd62..ee6015505a756 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -112,14 +112,21 @@ public class ParentQueue extends AbstractCSQueue { public ParentQueue(CapacitySchedulerContext cs, String queueName, CSQueue parent, CSQueue old) throws IOException { - this(cs, cs.getConfiguration(), queueName, parent, old); + this(cs, cs.getConfiguration(), queueName, parent, old, false); + } + private ParentQueue(CapacitySchedulerContext cs, + CapacitySchedulerConfiguration csConf, String queueName, + CSQueue parent, + CSQueue old) throws IOException { + this(cs, csConf, queueName, parent, old, false); } private ParentQueue(CapacitySchedulerContext cs, CapacitySchedulerConfiguration csConf, String queueName, CSQueue parent, - CSQueue old) + CSQueue old, boolean isDynamic) throws IOException { super(cs, queueName, parent, old); + setDynamicQueue(isDynamic); this.scheduler = cs; this.rootQueue = (parent == null); @@ -476,26 +483,6 @@ public String toString() { "numContainers=" + getNumContainers(); } - private CapacitySchedulerConfiguration getConfForAutoCreatedQueue( - String childQueuePath, boolean isLeaf) { - // Copy existing config - CapacitySchedulerConfiguration dupCSConfig = - new CapacitySchedulerConfiguration( - csContext.getConfiguration(), false); - autoCreatedQueueTemplate.setTemplateEntriesForChild(dupCSConfig, - childQueuePath); - if (isLeaf) { - // set to -1, to disable it - dupCSConfig.setUserLimitFactor(childQueuePath, -1); - - // Set Max AM percentage to a higher value - dupCSConfig.setMaximumApplicationMasterResourcePerQueuePercent( - childQueuePath, 0.5f); - } - - return dupCSConfig; - } - private CSQueue createNewQueue(String childQueuePath, boolean isLeaf) throws SchedulerDynamicEditException { try { @@ -504,13 +491,11 @@ private CSQueue createNewQueue(String childQueuePath, boolean isLeaf) childQueuePath.lastIndexOf(".") + 1); if (isLeaf) { - childQueue = new LeafQueue(csContext, - getConfForAutoCreatedQueue(childQueuePath, isLeaf), queueShortName, - this, null); + childQueue = new LeafQueue(csContext, csContext.getConfiguration(), + queueShortName, this, null, true); } else{ - childQueue = new ParentQueue(csContext, - getConfForAutoCreatedQueue(childQueuePath, isLeaf), queueShortName, - this, null); + childQueue = new ParentQueue(csContext, csContext.getConfiguration(), + queueShortName, this, null, true); } childQueue.setDynamicQueue(true); // It should be sufficient now, we don't need to set more, because weights diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java index b96c1e4896892..7d0d537b49ee7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java @@ -147,6 +147,8 @@ private void createBasicQueueStructureAndValidate() throws Exception { Assert.assertEquals(1f, c.getQueueCapacities().getWeight(), 1e-6); Assert.assertEquals(400 * GB, c.getQueueResourceQuotas().getEffectiveMinResource().getMemorySize()); + Assert.assertEquals(((LeafQueue)c).getUserLimitFactor(), -1, 1e-6); + Assert.assertEquals(((LeafQueue)c).getMaxAMResourcePerQueuePercent(), 1, 1e-6); // Now add another queue-d, in the same hierarchy createQueue("root.d-auto"); @@ -696,6 +698,14 @@ public void testAutoCreatedQueueTemplateConfig() throws Exception { cs.reinitialize(csConf, mockRM.getRMContext()); Assert.assertEquals("weight is not explicitly set", 4f, a2.getQueueCapacities().getWeight(), 1e-6); + + csConf.setBoolean(AutoCreatedQueueTemplate.getAutoQueueTemplatePrefix( + "root.a") + CapacitySchedulerConfiguration + .AUTO_CREATE_CHILD_QUEUE_AUTO_REMOVAL_ENABLE, false); + cs.reinitialize(csConf, mockRM.getRMContext()); + LeafQueue a3 = createQueue("root.a.a3"); + Assert.assertFalse("auto queue deletion should be turned off on a3", + a3.isEligibleForAutoDeletion()); } @Test From 047144dffe2b59d8e35a398c35d1f413ab89e9e0 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Wed, 16 Jun 2021 18:55:09 +0200 Subject: [PATCH 421/666] YARN-10813. Set default capacity of root for node labels. Contributed by Andras Gyori --- .../scheduler/capacity/CapacitySchedulerConfiguration.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 58a80028c6056..622fef61ffc54 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -800,10 +800,11 @@ private float internalGetLabeledQueueCapacity(String queue, String label, // root.From AbstractCSQueue, absolute resource, and weight will be parsed // and updated separately. Once nodes are added/removed in cluster, // capacity is percentage will also be re-calculated. - return defaultValue; + return queue.equals("root") ? 100.0f : defaultValue; } - float capacity = getFloat(capacityPropertyName, defaultValue); + float capacity = queue.equals("root") ? 100.0f + : getFloat(capacityPropertyName, defaultValue); if (capacity < MINIMUM_CAPACITY_VALUE || capacity > MAXIMUM_CAPACITY_VALUE) { throw new IllegalArgumentException( From 36a4699e76129f66b35f2820eb6260409b021128 Mon Sep 17 00:00:00 2001 From: Takanobu Asanuma Date: Thu, 17 Jun 2021 09:58:47 +0900 Subject: [PATCH 422/666] HADOOP-17760. Delete hadoop.ssl.enabled and dfs.https.enable from docs and core-default.xml (#3099) Reviewed-by: Ayush Saxena --- .../hadoop-common/src/main/resources/core-default.xml | 8 -------- .../hadoop-common/src/site/markdown/SecureMode.md | 3 +-- .../apache/hadoop/conf/TestCommonConfigurationFields.java | 2 -- .../hadoop-sls/src/main/data/2jobs2min-rumen-jh.json | 2 -- 4 files changed, 1 insertion(+), 14 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 4794bb2764c80..896874f32db71 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -3096,14 +3096,6 @@ - - hadoop.ssl.enabled - false - - Deprecated. Use dfs.http.policy and yarn.http.policy instead. - - - hadoop.ssl.enabled.protocols TLSv1.2 diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md index 523fa40dbc14f..ebfc16c1a5287 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md @@ -269,9 +269,8 @@ The following settings allow configuring SSL access to the NameNode web UI (opti | Parameter | Value | Notes | |:-----------------------------|:------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `dfs.http.policy` | `HTTP_ONLY` or `HTTPS_ONLY` or `HTTP_AND_HTTPS` | `HTTPS_ONLY` turns off http access. This option takes precedence over the deprecated configuration dfs.https.enable and hadoop.ssl.enabled. If using SASL to authenticate data transfer protocol instead of running DataNode as root and using privileged ports, then this property must be set to `HTTPS_ONLY` to guarantee authentication of HTTP servers. (See `dfs.data.transfer.protection`.) | +| `dfs.http.policy` | `HTTP_ONLY` or `HTTPS_ONLY` or `HTTP_AND_HTTPS` | `HTTPS_ONLY` turns off http access. If using SASL to authenticate data transfer protocol instead of running DataNode as root and using privileged ports, then this property must be set to `HTTPS_ONLY` to guarantee authentication of HTTP servers. (See `dfs.data.transfer.protection`.) | | `dfs.namenode.https-address` | `0.0.0.0:9871` | This parameter is used in non-HA mode and without federation. See [HDFS High Availability](../hadoop-hdfs/HDFSHighAvailabilityWithNFS.html#Deployment) and [HDFS Federation](../hadoop-hdfs/Federation.html#Federation_Configuration) for details. | -| `dfs.https.enable` | `true` | This value is deprecated. `Use dfs.http.policy` | ### Secondary NameNode diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java index 30ed4a4193783..8fd1df82b2ff8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java @@ -238,8 +238,6 @@ public void initializeMemberVariables() { // - org.apache.hadoop.net.NetUtils xmlPropsToSkipCompare .add("hadoop.rpc.socket.factory.class.ClientProtocol"); - // - Where is this used? - xmlPropsToSkipCompare.add("hadoop.ssl.enabled"); // Keys with no corresponding variable // - org.apache.hadoop.io.compress.bzip2.Bzip2Factory diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json index 8c2e43e1e53ba..8e9af7549cd33 100644 --- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json +++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json @@ -4559,7 +4559,6 @@ "hadoop.hdfs.configuration.version" : "1", "dfs.datanode.balance.bandwidthPerSec" : "1048576", "mapreduce.reduce.shuffle.connect.timeout" : "180000", - "hadoop.ssl.enabled" : "false", "dfs.journalnode.rpc-address" : "0.0.0.0:8485", "yarn.nodemanager.aux-services" : "mapreduce.shuffle", "mapreduce.job.counters.max" : "120", @@ -9626,7 +9625,6 @@ "hadoop.hdfs.configuration.version" : "1", "dfs.datanode.balance.bandwidthPerSec" : "1048576", "mapreduce.reduce.shuffle.connect.timeout" : "180000", - "hadoop.ssl.enabled" : "false", "dfs.journalnode.rpc-address" : "0.0.0.0:8485", "yarn.nodemanager.aux-services" : "mapreduce.shuffle", "mapreduce.job.counters.max" : "120", From 4cd5a69ac559727ae1b3575213bbba5e5d0d3718 Mon Sep 17 00:00:00 2001 From: hust_hhb <284824253@qq.com> Date: Thu, 17 Jun 2021 11:08:29 +0800 Subject: [PATCH 423/666] HDFS-13671. Namenode deletes large dir slowly caused by FoldedTreeSet#removeAndGet (#3065) --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 12 - .../hadoop/hdfs/protocolPB/PBHelper.java | 5 +- .../server/blockmanagement/BlockInfo.java | 189 ++- .../blockmanagement/BlockInfoContiguous.java | 29 +- .../blockmanagement/BlockInfoStriped.java | 33 +- .../server/blockmanagement/BlockManager.java | 453 ++---- .../server/blockmanagement/BlocksMap.java | 66 +- .../blockmanagement/DatanodeStorageInfo.java | 127 +- .../hdfs/server/datanode/BPServiceActor.java | 4 +- .../server/datanode/DirectoryScanner.java | 4 +- .../datanode/fsdataset/FsDatasetSpi.java | 7 +- .../fsdataset/impl/FsDatasetImpl.java | 7 +- .../datanode/fsdataset/impl/ReplicaMap.java | 81 +- .../hdfs/server/namenode/FSNamesystem.java | 7 +- .../server/protocol/BlockReportContext.java | 10 +- .../server/protocol/DatanodeProtocol.java | 1 + .../hadoop/hdfs/util/FoldedTreeSet.java | 1285 ----------------- .../src/main/proto/DatanodeProtocol.proto | 3 +- .../src/main/resources/hdfs-default.xml | 26 - .../apache/hadoop/hdfs/TestCrcCorruption.java | 2 +- .../hdfs/TestReconstructStripedFile.java | 2 +- .../hdfs/protocol/TestBlockListAsLongs.java | 4 +- .../server/blockmanagement/TestBlockInfo.java | 88 ++ .../blockmanagement/TestBlockManager.java | 70 +- .../blockmanagement/TestBlockReportLease.java | 5 +- ...nstructStripedBlocksWithRackAwareness.java | 9 +- .../server/datanode/SimulatedFSDataset.java | 2 +- .../TestBlockHasMultipleReplicasOnSameDN.java | 7 +- .../server/datanode/TestLargeBlockReport.java | 5 +- .../TestNNHandlesBlockReportPerStorage.java | 2 +- .../TestNNHandlesCombinedBlockReport.java | 2 +- .../extdataset/ExternalDatasetImpl.java | 2 +- .../fsdataset/impl/TestFsDatasetImpl.java | 38 +- .../namenode/NNThroughputBenchmark.java | 4 +- .../server/namenode/TestAddStripedBlocks.java | 4 +- .../server/namenode/TestDeadDatanode.java | 2 +- .../hadoop/hdfs/util/FoldedTreeSetTest.java | 644 --------- 37 files changed, 621 insertions(+), 2620 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/FoldedTreeSet.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/FoldedTreeSetTest.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 77355be0c2664..bc371ea08ac9e 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -291,18 +291,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2; public static final String DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY = "dfs.namenode.replication.max-streams-hard-limit"; public static final int DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4; - public static final String DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_INTERVAL_MS_KEY - = "dfs.namenode.storageinfo.defragment.interval.ms"; - public static final int - DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_INTERVAL_MS_DEFAULT = 10 * 60 * 1000; - public static final String DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_TIMEOUT_MS_KEY - = "dfs.namenode.storageinfo.defragment.timeout.ms"; - public static final int - DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_TIMEOUT_MS_DEFAULT = 4; - public static final String DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_RATIO_KEY - = "dfs.namenode.storageinfo.defragment.ratio"; - public static final double - DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_RATIO_DEFAULT = 0.75; public static final String DFS_NAMENODE_BLOCKREPORT_QUEUE_SIZE_KEY = "dfs.namenode.blockreport.queue.size"; public static final int DFS_NAMENODE_BLOCKREPORT_QUEUE_SIZE_DEFAULT diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index dff5fa574091c..690ad0c279019 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -967,8 +967,8 @@ public static JournalInfoProto convert(JournalInfo j) { public static BlockReportContext convert(BlockReportContextProto proto) { - return new BlockReportContext(proto.getTotalRpcs(), proto.getCurRpc(), - proto.getId(), proto.getLeaseId(), proto.getSorted()); + return new BlockReportContext(proto.getTotalRpcs(), + proto.getCurRpc(), proto.getId(), proto.getLeaseId()); } public static BlockReportContextProto convert(BlockReportContext context) { @@ -977,7 +977,6 @@ public static BlockReportContextProto convert(BlockReportContext context) { setCurRpc(context.getCurRpc()). setId(context.getReportId()). setLeaseId(context.getLeaseId()). - setSorted(context.isSorted()). build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index c6a7bb52b7045..b8047a8d08015 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -19,8 +19,8 @@ import java.io.IOException; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; -import java.util.NoSuchElementException; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; @@ -57,9 +57,19 @@ public abstract class BlockInfo extends Block /** For implementing {@link LightWeightGSet.LinkedElement} interface. */ private LightWeightGSet.LinkedElement nextLinkedElement; - - // Storages this block is replicated on - protected DatanodeStorageInfo[] storages; + /** + * This array contains triplets of references. For each i-th storage, the + * block belongs to triplets[3*i] is the reference to the + * {@link DatanodeStorageInfo} and triplets[3*i+1] and triplets[3*i+2] are + * references to the previous and the next blocks, respectively, in the list + * of blocks belonging to this storage. + * + * Using previous and next in Object triplets is done instead of a + * {@link LinkedList} list to efficiently use memory. With LinkedList the cost + * per replica is 42 bytes (LinkedList#Entry object per replica) versus 16 + * bytes using the triplets. + */ + protected Object[] triplets; private BlockUnderConstructionFeature uc; @@ -69,14 +79,14 @@ public abstract class BlockInfo extends Block * in the block group */ public BlockInfo(short size) { - this.storages = new DatanodeStorageInfo[size]; + this.triplets = new Object[3 * size]; this.bcId = INVALID_INODE_ID; this.replication = isStriped() ? 0 : size; } public BlockInfo(Block blk, short size) { super(blk); - this.storages = new DatanodeStorageInfo[size]; + this.triplets = new Object[3*size]; this.bcId = INVALID_INODE_ID; this.replication = isStriped() ? 0 : size; } @@ -106,31 +116,7 @@ public boolean isDeleted() { } public Iterator getStorageInfos() { - return new Iterator() { - - private int index = 0; - - @Override - public boolean hasNext() { - while (index < storages.length && storages[index] == null) { - index++; - } - return index < storages.length; - } - - @Override - public DatanodeStorageInfo next() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - return storages[index++]; - } - - @Override - public void remove() { - throw new UnsupportedOperationException("Sorry. can't remove."); - } - }; + return new BlocksMap.StorageIterator(this); } public DatanodeDescriptor getDatanode(int index) { @@ -139,18 +125,73 @@ public DatanodeDescriptor getDatanode(int index) { } DatanodeStorageInfo getStorageInfo(int index) { - assert this.storages != null : "BlockInfo is not initialized"; - return storages[index]; + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3 < triplets.length : "Index is out of bound"; + return (DatanodeStorageInfo)triplets[index*3]; + } + + BlockInfo getPrevious(int index) { + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound"; + BlockInfo info = (BlockInfo)triplets[index*3+1]; + assert info == null || + info.getClass().getName().startsWith(BlockInfo.class.getName()) : + "BlockInfo is expected at " + index*3; + return info; + } + + BlockInfo getNext(int index) { + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound"; + BlockInfo info = (BlockInfo)triplets[index*3+2]; + assert info == null || info.getClass().getName().startsWith( + BlockInfo.class.getName()) : + "BlockInfo is expected at " + index*3; + return info; } void setStorageInfo(int index, DatanodeStorageInfo storage) { - assert this.storages != null : "BlockInfo is not initialized"; - this.storages[index] = storage; + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3 < triplets.length : "Index is out of bound"; + triplets[index*3] = storage; + } + + /** + * Return the previous block on the block list for the datanode at + * position index. Set the previous block on the list to "to". + * + * @param index - the datanode index + * @param to - block to be set to previous on the list of blocks + * @return current previous block on the list of blocks + */ + BlockInfo setPrevious(int index, BlockInfo to) { + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound"; + BlockInfo info = (BlockInfo) triplets[index*3+1]; + triplets[index*3+1] = to; + return info; + } + + /** + * Return the next block on the block list for the datanode at + * position index. Set the next block on the list to "to". + * + * @param index - the datanode index + * @param to - block to be set to next on the list of blocks + * @return current next block on the list of blocks + */ + BlockInfo setNext(int index, BlockInfo to) { + assert this.triplets != null : "BlockInfo is not initialized"; + assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound"; + BlockInfo info = (BlockInfo) triplets[index*3+2]; + triplets[index*3+2] = to; + return info; } public int getCapacity() { - assert this.storages != null : "BlockInfo is not initialized"; - return storages.length; + assert this.triplets != null : "BlockInfo is not initialized"; + assert triplets.length % 3 == 0 : "Malformed BlockInfo"; + return triplets.length / 3; } /** @@ -227,6 +268,80 @@ int findStorageInfo(DatanodeStorageInfo storageInfo) { return -1; } + /** + * Insert this block into the head of the list of blocks + * related to the specified DatanodeStorageInfo. + * If the head is null then form a new list. + * @return current block as the new head of the list. + */ + BlockInfo listInsert(BlockInfo head, DatanodeStorageInfo storage) { + int dnIndex = this.findStorageInfo(storage); + assert dnIndex >= 0 : "Data node is not found: current"; + assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : + "Block is already in the list and cannot be inserted."; + this.setPrevious(dnIndex, null); + this.setNext(dnIndex, head); + if (head != null) { + head.setPrevious(head.findStorageInfo(storage), this); + } + return this; + } + + /** + * Remove this block from the list of blocks + * related to the specified DatanodeStorageInfo. + * If this block is the head of the list then return the next block as + * the new head. + * @return the new head of the list or null if the list becomes + * empy after deletion. + */ + BlockInfo listRemove(BlockInfo head, DatanodeStorageInfo storage) { + if (head == null) { + return null; + } + int dnIndex = this.findStorageInfo(storage); + if (dnIndex < 0) { // this block is not on the data-node list + return head; + } + + BlockInfo next = this.getNext(dnIndex); + BlockInfo prev = this.getPrevious(dnIndex); + this.setNext(dnIndex, null); + this.setPrevious(dnIndex, null); + if (prev != null) { + prev.setNext(prev.findStorageInfo(storage), next); + } + if (next != null) { + next.setPrevious(next.findStorageInfo(storage), prev); + } + if (this == head) { // removing the head + head = next; + } + return head; + } + + /** + * Remove this block from the list of blocks related to the specified + * DatanodeDescriptor. Insert it into the head of the list of blocks. + * + * @return the new head of the list. + */ + public BlockInfo moveBlockToHead(BlockInfo head, DatanodeStorageInfo storage, + int curIndex, int headIndex) { + if (head == this) { + return this; + } + BlockInfo next = this.setNext(curIndex, head); + BlockInfo prev = this.setPrevious(curIndex, null); + + head.setPrevious(headIndex, this); + prev.setNext(prev.findStorageInfo(storage), next); + if (next != null) { + next.setPrevious(next.findStorageInfo(storage), prev); + } + return this; + } + @Override public int hashCode() { // Super implementation is sufficient diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java index f830678b429b3..d68b72d6c6277 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java @@ -38,20 +38,20 @@ public BlockInfoContiguous(Block blk, short size) { } /** - * Ensure that there is enough space to include num more storages. - * @return first free storage index. + * Ensure that there is enough space to include num more triplets. + * @return first free triplet index. */ private int ensureCapacity(int num) { - assert this.storages != null : "BlockInfo is not initialized"; + assert this.triplets != null : "BlockInfo is not initialized"; int last = numNodes(); - if (storages.length >= (last+num)) { + if (triplets.length >= (last+num)*3) { return last; } /* Not enough space left. Create a new array. Should normally * happen only when replication is manually increased by the user. */ - DatanodeStorageInfo[] old = storages; - storages = new DatanodeStorageInfo[(last+num)]; - System.arraycopy(old, 0, storages, 0, last); + Object[] old = triplets; + triplets = new Object[(last+num)*3]; + System.arraycopy(old, 0, triplets, 0, last * 3); return last; } @@ -63,6 +63,8 @@ boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) { // find the last null node int lastNode = ensureCapacity(1); setStorageInfo(lastNode, storage); + setNext(lastNode, null); + setPrevious(lastNode, null); return true; } @@ -72,12 +74,18 @@ boolean removeStorage(DatanodeStorageInfo storage) { if (dnIndex < 0) { // the node is not found return false; } + assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : + "Block is still in the list and must be removed first."; // find the last not null node int lastNode = numNodes()-1; - // replace current node entry by the lastNode one + // replace current node triplet by the lastNode one setStorageInfo(dnIndex, getStorageInfo(lastNode)); - // set the last entry to null + setNext(dnIndex, getNext(lastNode)); + setPrevious(dnIndex, getPrevious(lastNode)); + // set the last triplet to null setStorageInfo(lastNode, null); + setNext(lastNode, null); + setPrevious(lastNode, null); return true; } @@ -96,7 +104,8 @@ boolean isProvided() { @Override public int numNodes() { - assert this.storages != null : "BlockInfo is not initialized"; + assert this.triplets != null : "BlockInfo is not initialized"; + assert triplets.length % 3 == 0 : "Malformed BlockInfo"; for (int idx = getCapacity()-1; idx >= 0; idx--) { if (getDatanode(idx) != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java index 5a133412a85d7..42d0471433414 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java @@ -32,20 +32,21 @@ /** * Subclass of {@link BlockInfo}, presenting a block group in erasure coding. * - * We still use a storage array to store DatanodeStorageInfo for each block in - * the block group. For a (m+k) block group, the first (m+k) storage units + * We still use triplets to store DatanodeStorageInfo for each block in the + * block group, as well as the previous/next block in the corresponding + * DatanodeStorageInfo. For a (m+k) block group, the first (m+k) triplet units * are sorted and strictly mapped to the corresponding block. * * Normally each block belonging to group is stored in only one DataNode. - * However, it is possible that some block is over-replicated. Thus the storage + * However, it is possible that some block is over-replicated. Thus the triplet * array's size can be larger than (m+k). Thus currently we use an extra byte - * array to record the block index for each entry. + * array to record the block index for each triplet. */ @InterfaceAudience.Private public class BlockInfoStriped extends BlockInfo { private final ErasureCodingPolicy ecPolicy; /** - * Always the same size with storage. Record the block index for each entry + * Always the same size with triplets. Record the block index for each triplet * TODO: actually this is only necessary for over-replicated block. Thus can * be further optimized to save memory usage. */ @@ -109,7 +110,7 @@ private int findSlot() { return i; } } - // need to expand the storage size + // need to expand the triplet size ensureCapacity(i + 1, true); return i; } @@ -141,6 +142,8 @@ boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) { private void addStorage(DatanodeStorageInfo storage, int index, int blockIndex) { setStorageInfo(index, storage); + setNext(index, null); + setPrevious(index, null); indices[index] = (byte) blockIndex; } @@ -183,22 +186,26 @@ boolean removeStorage(DatanodeStorageInfo storage) { if (dnIndex < 0) { // the node is not found return false; } - // set the entry to null + assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : + "Block is still in the list and must be removed first."; + // set the triplet to null setStorageInfo(dnIndex, null); + setNext(dnIndex, null); + setPrevious(dnIndex, null); indices[dnIndex] = -1; return true; } private void ensureCapacity(int totalSize, boolean keepOld) { if (getCapacity() < totalSize) { - DatanodeStorageInfo[] old = storages; + Object[] old = triplets; byte[] oldIndices = indices; - storages = new DatanodeStorageInfo[totalSize]; + triplets = new Object[totalSize * 3]; indices = new byte[totalSize]; initIndices(); if (keepOld) { - System.arraycopy(old, 0, storages, 0, old.length); + System.arraycopy(old, 0, triplets, 0, old.length); System.arraycopy(oldIndices, 0, indices, 0, oldIndices.length); } } @@ -225,7 +232,8 @@ public BlockType getBlockType() { @Override public int numNodes() { - assert this.storages != null : "BlockInfo is not initialized"; + assert this.triplets != null : "BlockInfo is not initialized"; + assert triplets.length % 3 == 0 : "Malformed BlockInfo"; int num = 0; for (int idx = getCapacity()-1; idx >= 0; idx--) { if (getStorageInfo(idx) != null) { @@ -304,7 +312,8 @@ public StorageAndBlockIndex next() { throw new NoSuchElementException(); } int i = index++; - return new StorageAndBlockIndex(storages[i], indices[i]); + return new StorageAndBlockIndex( + (DatanodeStorageInfo) triplets[i * 3], indices[i]); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index cc7b93fc4e4a6..ead915f1d38de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -35,6 +35,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; @@ -68,7 +69,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -110,7 +110,6 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; -import org.apache.hadoop.hdfs.util.FoldedTreeSet; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.namenode.CacheManager; @@ -125,7 +124,6 @@ import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @@ -312,11 +310,6 @@ public long getTotalECBlockGroups() { private int replQueueResetToHeadThreshold; private int replQueueCallsSinceReset = 0; - /** How often to check and the limit for the storageinfo efficiency. */ - private final long storageInfoDefragmentInterval; - private final long storageInfoDefragmentTimeout; - private final double storageInfoDefragmentRatio; - /** * Mapping: Block {@literal ->} { BlockCollection, datanodes, self ref } * Updated only in response to client-sent information. @@ -331,10 +324,6 @@ public long getTotalECBlockGroups() { * {@link #redundancyThread} has run at least one full iteration. */ private final AtomicLong lastRedundancyCycleTS = new AtomicLong(-1); - /** StorageInfoDefragmenter thread. */ - private final Daemon storageInfoDefragmenterThread = - new Daemon(new StorageInfoDefragmenter()); - /** Block report thread for handling async reports. */ private final BlockReportProcessingThread blockReportThread; @@ -548,19 +537,6 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled, DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT, TimeUnit.SECONDS, TimeUnit.MILLISECONDS); - this.storageInfoDefragmentInterval = - conf.getLong( - DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_INTERVAL_MS_KEY, - DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_INTERVAL_MS_DEFAULT); - this.storageInfoDefragmentTimeout = - conf.getLong( - DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_TIMEOUT_MS_KEY, - DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_TIMEOUT_MS_DEFAULT); - this.storageInfoDefragmentRatio = - conf.getDouble( - DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_RATIO_KEY, - DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_RATIO_DEFAULT); - this.encryptDataTransfer = conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT); @@ -749,8 +725,6 @@ public void activate(Configuration conf, long blockTotal) { datanodeManager.activate(conf); this.redundancyThread.setName("RedundancyMonitor"); this.redundancyThread.start(); - storageInfoDefragmenterThread.setName("StorageInfoMonitor"); - storageInfoDefragmenterThread.start(); this.blockReportThread.start(); mxBeanName = MBeans.register("NameNode", "BlockStats", this); bmSafeMode.activate(blockTotal); @@ -763,10 +737,8 @@ public void close() { bmSafeMode.close(); try { redundancyThread.interrupt(); - storageInfoDefragmenterThread.interrupt(); blockReportThread.interrupt(); redundancyThread.join(3000); - storageInfoDefragmenterThread.join(3000); blockReportThread.join(3000); } catch (InterruptedException ie) { } @@ -1715,18 +1687,9 @@ public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, /** Remove the blocks associated to the given datanode. */ void removeBlocksAssociatedTo(final DatanodeDescriptor node) { providedStorageMap.removeDatanode(node); - for (DatanodeStorageInfo storage : node.getStorageInfos()) { - final Iterator it = storage.getBlockIterator(); - //add the BlockInfos to a new collection as the - //returned iterator is not modifiable. - Collection toRemove = new ArrayList<>(); - while (it.hasNext()) { - toRemove.add(it.next()); - } - - for (BlockInfo b : toRemove) { - removeStoredBlock(b, node); - } + final Iterator it = node.getBlockIterator(); + while(it.hasNext()) { + removeStoredBlock(it.next(), node); } // Remove all pending DN messages referencing this DN. pendingDNMessages.removeAllMessagesForDatanode(node); @@ -1740,11 +1703,8 @@ void removeBlocksAssociatedTo(final DatanodeStorageInfo storageInfo) { assert namesystem.hasWriteLock(); final Iterator it = storageInfo.getBlockIterator(); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); - Collection toRemove = new ArrayList<>(); - while (it.hasNext()) { - toRemove.add(it.next()); - } - for (BlockInfo block : toRemove) { + while(it.hasNext()) { + BlockInfo block = it.next(); removeStoredBlock(block, node); final Block b = getBlockOnStorage(block, storageInfo); if (b != null) { @@ -1908,7 +1868,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, // stale storage due to failover or any other reason. corruptReplicas.removeFromCorruptReplicasMap(b.getStored(), node); BlockInfoStriped blk = (BlockInfoStriped) getStoredBlock(b.getStored()); - blk.removeStorage(storageInfo); + storageInfo.removeBlock(blk); } // the block is over-replicated so invalidate the replicas immediately invalidateBlock(b, node, numberOfReplicas); @@ -2824,7 +2784,7 @@ public boolean processReport(final DatanodeID nodeID, // Block reports for provided storage are not // maintained by DN heartbeats if (!StorageType.PROVIDED.equals(storageInfo.getStorageType())) { - invalidatedBlocks = processReport(storageInfo, newReport, context); + invalidatedBlocks = processReport(storageInfo, newReport); } } storageInfo.receivedBlockReport(); @@ -2921,8 +2881,7 @@ void rescanPostponedMisreplicatedBlocks() { Collection processReport( final DatanodeStorageInfo storageInfo, - final BlockListAsLongs report, - BlockReportContext context) throws IOException { + final BlockListAsLongs report) throws IOException { // Normal case: // Modify the (block-->datanode) map, according to the difference // between the old and new block report. @@ -2932,36 +2891,8 @@ Collection processReport( Collection toInvalidate = new ArrayList<>(); Collection toCorrupt = new ArrayList<>(); Collection toUC = new ArrayList<>(); - - boolean sorted = false; - String strBlockReportId = ""; - if (context != null) { - sorted = context.isSorted(); - strBlockReportId = Long.toHexString(context.getReportId()); - } - - Iterable sortedReport; - if (!sorted) { - blockLog.warn("BLOCK* processReport 0x{}: Report from the DataNode ({}) " - + "is unsorted. This will cause overhead on the NameNode " - + "which needs to sort the Full BR. Please update the " - + "DataNode to the same version of Hadoop HDFS as the " - + "NameNode ({}).", - strBlockReportId, - storageInfo.getDatanodeDescriptor().getDatanodeUuid(), - VersionInfo.getVersion()); - Set set = new FoldedTreeSet<>(); - for (BlockReportReplica iblk : report) { - set.add(new BlockReportReplica(iblk)); - } - sortedReport = set; - } else { - sortedReport = report; - } - - reportDiffSorted(storageInfo, sortedReport, - toAdd, toRemove, toInvalidate, toCorrupt, toUC); - + reportDiff(storageInfo, report, + toAdd, toRemove, toInvalidate, toCorrupt, toUC); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); // Process the blocks on each queue @@ -2978,8 +2909,8 @@ Collection processReport( numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { - blockLog.info("BLOCK* processReport 0x{}: logged info for {} of {} " + - "reported.", strBlockReportId, maxNumBlocksToLog, numBlocksLogged); + blockLog.info("BLOCK* processReport: logged info for {} of {} " + + "reported.", maxNumBlocksToLog, numBlocksLogged); } for (Block b : toInvalidate) { addToInvalidates(b, node); @@ -3111,106 +3042,127 @@ void processFirstBlockReport( } } - private void reportDiffSorted(DatanodeStorageInfo storageInfo, - Iterable newReport, + private void reportDiff(DatanodeStorageInfo storageInfo, + BlockListAsLongs newReport, Collection toAdd, // add to DatanodeDescriptor Collection toRemove, // remove from DatanodeDescriptor Collection toInvalidate, // should be removed from DN Collection toCorrupt, // add to corrupt replicas list Collection toUC) { // add to under-construction list - // The blocks must be sorted and the storagenodes blocks must be sorted - Iterator storageBlocksIterator = storageInfo.getBlockIterator(); + // place a delimiter in the list which separates blocks + // that have been reported from those that have not DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor(); - BlockInfo storageBlock = null; - - for (BlockReportReplica replica : newReport) { - - long replicaID = replica.getBlockId(); - if (BlockIdManager.isStripedBlockID(replicaID) - && (!hasNonEcBlockUsingStripedID || - !blocksMap.containsBlock(replica))) { - replicaID = BlockIdManager.convertToStripedID(replicaID); - } - - ReplicaState reportedState = replica.getState(); - - LOG.debug("Reported block {} on {} size {} replicaState = {}", - replica, dn, replica.getNumBytes(), reportedState); - - if (shouldPostponeBlocksFromFuture - && isGenStampInFuture(replica)) { - queueReportedBlock(storageInfo, replica, reportedState, - QUEUE_REASON_FUTURE_GENSTAMP); - continue; - } - - if (storageBlock == null && storageBlocksIterator.hasNext()) { - storageBlock = storageBlocksIterator.next(); - } - - do { - int cmp; - if (storageBlock == null || - (cmp = Long.compare(replicaID, storageBlock.getBlockId())) < 0) { - // Check if block is available in NN but not yet on this storage - BlockInfo nnBlock = blocksMap.getStoredBlock(new Block(replicaID)); - if (nnBlock != null) { - reportDiffSortedInner(storageInfo, replica, reportedState, - nnBlock, toAdd, toCorrupt, toUC); - } else { - // Replica not found anywhere so it should be invalidated - toInvalidate.add(new Block(replica)); - } - break; - } else if (cmp == 0) { - // Replica matched current storageblock - reportDiffSortedInner(storageInfo, replica, reportedState, - storageBlock, toAdd, toCorrupt, toUC); - storageBlock = null; - } else { - // replica has higher ID than storedBlock - // Remove all stored blocks with IDs lower than replica - do { - toRemove.add(storageBlock); - storageBlock = storageBlocksIterator.hasNext() - ? storageBlocksIterator.next() : null; - } while (storageBlock != null && - Long.compare(replicaID, storageBlock.getBlockId()) > 0); + Block delimiterBlock = new Block(); + BlockInfo delimiter = new BlockInfoContiguous(delimiterBlock, + (short) 1); + AddBlockResult result = storageInfo.addBlock(delimiter, delimiterBlock); + assert result == AddBlockResult.ADDED + : "Delimiting block cannot be present in the node"; + int headIndex = 0; //currently the delimiter is in the head of the list + int curIndex; + + if (newReport == null) { + newReport = BlockListAsLongs.EMPTY; + } + // scan the report and process newly reported blocks + for (BlockReportReplica iblk : newReport) { + ReplicaState iState = iblk.getState(); + LOG.debug("Reported block {} on {} size {} replicaState = {}", iblk, dn, + iblk.getNumBytes(), iState); + BlockInfo storedBlock = processReportedBlock(storageInfo, + iblk, iState, toAdd, toInvalidate, toCorrupt, toUC); + + // move block to the head of the list + if (storedBlock != null) { + curIndex = storedBlock.findStorageInfo(storageInfo); + if (curIndex >= 0) { + headIndex = + storageInfo.moveBlockToHead(storedBlock, curIndex, headIndex); } - } while (storageBlock != null); + } } - // Iterate any remaining blocks that have not been reported and remove them - while (storageBlocksIterator.hasNext()) { - toRemove.add(storageBlocksIterator.next()); + // collect blocks that have not been reported + // all of them are next to the delimiter + Iterator it = + storageInfo.new BlockIterator(delimiter.getNext(0)); + while (it.hasNext()) { + toRemove.add(it.next()); } + storageInfo.removeBlock(delimiter); } - private void reportDiffSortedInner( + /** + * Process a block replica reported by the data-node. + * No side effects except adding to the passed-in Collections. + * + *
      + *
    1. If the block is not known to the system (not in blocksMap) then the + * data-node should be notified to invalidate this block.
    2. + *
    3. If the reported replica is valid that is has the same generation stamp + * and length as recorded on the name-node, then the replica location should + * be added to the name-node.
    4. + *
    5. If the reported replica is not valid, then it is marked as corrupt, + * which triggers replication of the existing valid replicas. + * Corrupt replicas are removed from the system when the block + * is fully replicated.
    6. + *
    7. If the reported replica is for a block currently marked "under + * construction" in the NN, then it should be added to the + * BlockUnderConstructionFeature's list of replicas.
    8. + *
    + * + * @param storageInfo DatanodeStorageInfo that sent the report. + * @param block reported block replica + * @param reportedState reported replica state + * @param toAdd add to DatanodeDescriptor + * @param toInvalidate missing blocks (not in the blocks map) + * should be removed from the data-node + * @param toCorrupt replicas with unexpected length or generation stamp; + * add to corrupt replicas + * @param toUC replicas of blocks currently under construction + * @return the up-to-date stored block, if it should be kept. + * Otherwise, null. + */ + private BlockInfo processReportedBlock( final DatanodeStorageInfo storageInfo, - final BlockReportReplica replica, final ReplicaState reportedState, - final BlockInfo storedBlock, + final Block block, final ReplicaState reportedState, final Collection toAdd, + final Collection toInvalidate, final Collection toCorrupt, final Collection toUC) { - assert replica != null; - assert storedBlock != null; - DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor(); + + LOG.debug("Reported block {} on {} size {} replicaState = {}", block, dn, + block.getNumBytes(), reportedState); + + if (shouldPostponeBlocksFromFuture && isGenStampInFuture(block)) { + queueReportedBlock(storageInfo, block, reportedState, + QUEUE_REASON_FUTURE_GENSTAMP); + return null; + } + + // find block by blockId + BlockInfo storedBlock = getStoredBlock(block); + if(storedBlock == null) { + // If blocksMap does not contain reported block id, + // the replica should be removed from the data-node. + toInvalidate.add(new Block(block)); + return null; + } BlockUCState ucState = storedBlock.getBlockUCState(); // Block is on the NN LOG.debug("In memory blockUCState = {}", ucState); // Ignore replicas already scheduled to be removed from the DN - if (invalidateBlocks.contains(dn, replica)) { - return; + if(invalidateBlocks.contains(dn, block)) { + return storedBlock; } - BlockToMarkCorrupt c = checkReplicaCorrupt(replica, reportedState, - storedBlock, ucState, dn); + BlockToMarkCorrupt c = checkReplicaCorrupt( + block, reportedState, storedBlock, ucState, dn); if (c != null) { if (shouldPostponeBlocksFromFuture) { // If the block is an out-of-date generation stamp or state, @@ -3220,21 +3172,28 @@ private void reportDiffSortedInner( // comes from the IBR / FBR and hence what we should use to compare // against the memory state. // See HDFS-6289 and HDFS-15422 for more context. - queueReportedBlock(storageInfo, replica, reportedState, + queueReportedBlock(storageInfo, block, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { toCorrupt.add(c); } - } else if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { - toUC.add(new StatefulBlockInfo(storedBlock, new Block(replica), - reportedState)); - } else if (reportedState == ReplicaState.FINALIZED && - (storedBlock.findStorageInfo(storageInfo) == -1 || - corruptReplicas.isReplicaCorrupt(storedBlock, dn))) { - // Add replica if appropriate. If the replica was previously corrupt - // but now okay, it might need to be updated. - toAdd.add(new BlockInfoToAdd(storedBlock, new Block(replica))); + return storedBlock; } + + if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { + toUC.add(new StatefulBlockInfo(storedBlock, + new Block(block), reportedState)); + return storedBlock; + } + + // Add replica if appropriate. If the replica was previously corrupt + // but now okay, it might need to be updated. + if (reportedState == ReplicaState.FINALIZED + && (storedBlock.findStorageInfo(storageInfo) == -1 || + corruptReplicas.isReplicaCorrupt(storedBlock, dn))) { + toAdd.add(new BlockInfoToAdd(storedBlock, new Block(block))); + } + return storedBlock; } /** @@ -3477,7 +3436,7 @@ private void addStoredBlockImmediate(BlockInfo storedBlock, Block reported, } // just add it - AddBlockResult result = storageInfo.addBlockInitial(storedBlock, reported); + AddBlockResult result = storageInfo.addBlock(storedBlock, reported); // Now check for completion of blocks and safe block count int numCurrentReplica = countLiveNodes(storedBlock); @@ -4242,6 +4201,12 @@ private boolean processAndHandleReportedBlock( DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, DatanodeDescriptor delHintNode) throws IOException { + // blockReceived reports a finalized block + Collection toAdd = new LinkedList<>(); + Collection toInvalidate = new LinkedList(); + Collection toCorrupt = + new LinkedList(); + Collection toUC = new LinkedList(); final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); @@ -4255,58 +4220,33 @@ private boolean processAndHandleReportedBlock( return false; } - // find block by blockId - BlockInfo storedBlock = getStoredBlock(block); - if(storedBlock == null) { - // If blocksMap does not contain reported block id, - // the replica should be removed from the data-node. - blockLog.debug("BLOCK* addBlock: block {} on node {} size {} does not " + - "belong to any file", block, node, block.getNumBytes()); - addToInvalidates(new Block(block), node); - return true; - } - - BlockUCState ucState = storedBlock.getBlockUCState(); - // Block is on the NN - LOG.debug("In memory blockUCState = {}", ucState); + processReportedBlock(storageInfo, block, reportedState, toAdd, toInvalidate, + toCorrupt, toUC); + // the block is only in one of the to-do lists + // if it is in none then data-node already has it + assert toUC.size() + toAdd.size() + toInvalidate.size() + toCorrupt + .size() <= 1 : "The block should be only in one of the lists."; - // Ignore replicas already scheduled to be removed from the DN - if(invalidateBlocks.contains(node, block)) { - return true; + for (StatefulBlockInfo b : toUC) { + addStoredBlockUnderConstruction(b, storageInfo); } - - BlockToMarkCorrupt c = checkReplicaCorrupt( - block, reportedState, storedBlock, ucState, node); - if (c != null) { - if (shouldPostponeBlocksFromFuture) { - // If the block is an out-of-date generation stamp or state, - // but we're the standby, we shouldn't treat it as corrupt, - // but instead just queue it for later processing. - // Storing the reported block for later processing, as that is what - // comes from the IBR / FBR and hence what we should use to compare - // against the memory state. - // See HDFS-6289 and HDFS-15422 for more context. - queueReportedBlock(storageInfo, block, reportedState, - QUEUE_REASON_CORRUPT_STATE); - } else { - markBlockAsCorrupt(c, storageInfo, node); - } - return true; + long numBlocksLogged = 0; + for (BlockInfoToAdd b : toAdd) { + addStoredBlock(b.stored, b.reported, storageInfo, delHintNode, + numBlocksLogged < maxNumBlocksToLog); + numBlocksLogged++; } - - if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { - addStoredBlockUnderConstruction( - new StatefulBlockInfo(storedBlock, new Block(block), reportedState), - storageInfo); - return true; + if (numBlocksLogged > maxNumBlocksToLog) { + blockLog.debug("BLOCK* addBlock: logged info for {} of {} reported.", + maxNumBlocksToLog, numBlocksLogged); } - - // Add replica if appropriate. If the replica was previously corrupt - // but now okay, it might need to be updated. - if (reportedState == ReplicaState.FINALIZED - && (storedBlock.findStorageInfo(storageInfo) == -1 || - corruptReplicas.isReplicaCorrupt(storedBlock, node))) { - addStoredBlock(storedBlock, block, storageInfo, delHintNode, true); + for (Block b : toInvalidate) { + blockLog.debug("BLOCK* addBlock: block {} on node {} size {} does not " + + "belong to any file", b, node, b.getNumBytes()); + addToInvalidates(b, node); + } + for (BlockToMarkCorrupt b : toCorrupt) { + markBlockAsCorrupt(b, storageInfo, node); } return true; } @@ -5006,91 +4946,6 @@ public void run() { } } - /** - * Runnable that monitors the fragmentation of the StorageInfo TreeSet and - * compacts it when it falls under a certain threshold. - */ - private class StorageInfoDefragmenter implements Runnable { - - @Override - public void run() { - while (namesystem.isRunning()) { - try { - // Check storage efficiency only when active NN is out of safe mode. - if (isPopulatingReplQueues()) { - scanAndCompactStorages(); - } - Thread.sleep(storageInfoDefragmentInterval); - } catch (Throwable t) { - if (!namesystem.isRunning()) { - LOG.info("Stopping thread."); - if (!(t instanceof InterruptedException)) { - LOG.info("Received an exception while shutting down.", t); - } - break; - } else if (!checkNSRunning && t instanceof InterruptedException) { - LOG.info("Stopping for testing."); - break; - } - LOG.error("Thread received Runtime exception.", t); - terminate(1, t); - } - } - } - - private void scanAndCompactStorages() throws InterruptedException { - ArrayList datanodesAndStorages = new ArrayList<>(); - for (DatanodeDescriptor node - : datanodeManager.getDatanodeListForReport(DatanodeReportType.ALL)) { - for (DatanodeStorageInfo storage : node.getStorageInfos()) { - try { - namesystem.readLock(); - double ratio = storage.treeSetFillRatio(); - if (ratio < storageInfoDefragmentRatio) { - datanodesAndStorages.add(node.getDatanodeUuid()); - datanodesAndStorages.add(storage.getStorageID()); - } - LOG.debug("StorageInfo TreeSet fill ratio {} : {}{}", - storage.getStorageID(), ratio, - (ratio < storageInfoDefragmentRatio) - ? " (queued for defragmentation)" : ""); - } finally { - namesystem.readUnlock(); - } - } - } - if (!datanodesAndStorages.isEmpty()) { - for (int i = 0; i < datanodesAndStorages.size(); i += 2) { - namesystem.writeLock(); - try { - final DatanodeDescriptor dn = datanodeManager. - getDatanode(datanodesAndStorages.get(i)); - if (dn == null) { - continue; - } - final DatanodeStorageInfo storage = dn. - getStorageInfo(datanodesAndStorages.get(i + 1)); - if (storage != null) { - boolean aborted = - !storage.treeSetCompact(storageInfoDefragmentTimeout); - if (aborted) { - // Compaction timed out, reset iterator to continue with - // the same storage next iteration. - i -= 2; - } - LOG.info("StorageInfo TreeSet defragmented {} : {}{}", - storage.getStorageID(), storage.treeSetFillRatio(), - aborted ? " (aborted)" : ""); - } - } finally { - namesystem.writeUnlock(); - } - // Wait between each iteration - Thread.sleep(1000); - } - } - } - } /** * Compute block replication and block invalidation work that can be scheduled diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index a96c815b0069b..9deeb41a55500 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import java.util.Collections; import java.util.Iterator; import java.util.concurrent.atomic.LongAdder; @@ -32,6 +31,37 @@ * the datanodes that store the block. */ class BlocksMap { + public static class StorageIterator implements Iterator { + private final BlockInfo blockInfo; + private int nextIdx = 0; + + StorageIterator(BlockInfo blkInfo) { + this.blockInfo = blkInfo; + } + + @Override + public boolean hasNext() { + if (blockInfo == null) { + return false; + } + while (nextIdx < blockInfo.getCapacity() && + blockInfo.getDatanode(nextIdx) == null) { + // note that for striped blocks there may be null in the triplets + nextIdx++; + } + return nextIdx < blockInfo.getCapacity(); + } + + @Override + public DatanodeStorageInfo next() { + return blockInfo.getStorageInfo(nextIdx++); + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Sorry. can't remove."); + } + } /** Constant {@link LightWeightGSet} capacity. */ private final int capacity; @@ -111,16 +141,6 @@ void removeBlock(BlockInfo block) { } } - /** - * Check if BlocksMap contains the block. - * - * @param b Block to check - * @return true if block is in the map, otherwise false - */ - boolean containsBlock(Block b) { - return blocks.contains(b); - } - /** Returns the block object if it exists in the map. */ BlockInfo getStoredBlock(Block b) { return blocks.get(b); @@ -131,9 +151,7 @@ BlockInfo getStoredBlock(Block b) { * returns {@link Iterable} of the storages the block belongs to. */ Iterable getStorages(Block b) { - BlockInfo block = blocks.get(b); - return block != null ? getStorages(block) - : Collections.emptyList(); + return getStorages(blocks.get(b)); } /** @@ -141,16 +159,12 @@ Iterable getStorages(Block b) { * returns {@link Iterable} of the storages the block belongs to. */ Iterable getStorages(final BlockInfo storedBlock) { - if (storedBlock == null) { - return Collections.emptyList(); - } else { - return new Iterable() { - @Override - public Iterator iterator() { - return storedBlock.getStorageInfos(); - } - }; - } + return new Iterable() { + @Override + public Iterator iterator() { + return new StorageIterator(storedBlock); + } + }; } /** counts number of containing nodes. Better than using iterator. */ @@ -169,7 +183,7 @@ boolean removeNode(Block b, DatanodeDescriptor node) { if (info == null) return false; - // remove block from the data-node set and the node from the block info + // remove block from the data-node list and the node from the block info boolean removed = removeBlock(node, info); if (info.hasNoStorage() // no datanodes left @@ -181,7 +195,7 @@ boolean removeNode(Block b, DatanodeDescriptor node) { } /** - * Remove block from the set of blocks belonging to the data-node. Remove + * Remove block from the list of blocks belonging to the data-node. Remove * data-node from the block. */ static boolean removeBlock(DatanodeDescriptor dn, BlockInfo b) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index 8e46a2682409a..188ea11f7e646 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.Arrays; -import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -28,7 +27,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.hdfs.util.FoldedTreeSet; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @@ -87,6 +85,32 @@ public void updateFromStorage(DatanodeStorage storage) { storageType = storage.getStorageType(); } + /** + * Iterates over the list of blocks belonging to the data-node. + */ + class BlockIterator implements Iterator { + private BlockInfo current; + + BlockIterator(BlockInfo head) { + this.current = head; + } + + public boolean hasNext() { + return current != null; + } + + public BlockInfo next() { + BlockInfo res = current; + current = + current.getNext(current.findStorageInfo(DatanodeStorageInfo.this)); + return res; + } + + public void remove() { + throw new UnsupportedOperationException("Sorry. can't remove."); + } + } + private final DatanodeDescriptor dn; private final String storageID; private StorageType storageType; @@ -98,7 +122,8 @@ public void updateFromStorage(DatanodeStorage storage) { private volatile long remaining; private long blockPoolUsed; - private final FoldedTreeSet blocks = new FoldedTreeSet<>(); + private volatile BlockInfo blockList = null; + private int numBlocks = 0; /** The number of block reports received */ private int blockReportCount = 0; @@ -182,7 +207,7 @@ void setHeartbeatedSinceFailover(boolean value) { } boolean areBlocksOnFailedStorage() { - return getState() == State.FAILED && !blocks.isEmpty(); + return getState() == State.FAILED && numBlocks != 0; } @VisibleForTesting @@ -213,36 +238,6 @@ long getRemaining() { long getBlockPoolUsed() { return blockPoolUsed; } - /** - * For use during startup. Expects block to be added in sorted order - * to enable fast insert in to the DatanodeStorageInfo - * - * @param b Block to add to DatanodeStorageInfo - * @param reportedBlock The reported replica - * @return Enum describing if block was added, replaced or already existed - */ - public AddBlockResult addBlockInitial(BlockInfo b, Block reportedBlock) { - // First check whether the block belongs to a different storage - // on the same DN. - AddBlockResult result = AddBlockResult.ADDED; - DatanodeStorageInfo otherStorage = - b.findStorageInfo(getDatanodeDescriptor()); - - if (otherStorage != null) { - if (otherStorage != this) { - // The block belongs to a different storage. Remove it first. - otherStorage.removeBlock(b); - result = AddBlockResult.REPLACED; - } else { - // The block is already associated with this storage. - return AddBlockResult.ALREADY_EXIST; - } - } - - b.addStorage(this, reportedBlock); - blocks.addSortedLast(b); - return result; - } public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { // First check whether the block belongs to a different storage @@ -262,8 +257,9 @@ public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) { } } + // add to the head of the data-node list b.addStorage(this, reportedBlock); - blocks.add(b); + insertToList(b); return result; } @@ -271,21 +267,45 @@ AddBlockResult addBlock(BlockInfo b) { return addBlock(b, b); } + public void insertToList(BlockInfo b) { + blockList = b.listInsert(blockList, this); + numBlocks++; + } boolean removeBlock(BlockInfo b) { - blocks.remove(b); - return b.removeStorage(this); + blockList = b.listRemove(blockList, this); + if (b.removeStorage(this)) { + numBlocks--; + return true; + } else { + return false; + } } int numBlocks() { - return blocks.size(); + return numBlocks; } - + + Iterator getBlockIterator() { + return new BlockIterator(blockList); + } + /** - * @return iterator to an unmodifiable set of blocks - * related to this {@link DatanodeStorageInfo} + * Move block to the head of the list of blocks belonging to the data-node. + * @return the index of the head of the blockList */ - Iterator getBlockIterator() { - return Collections.unmodifiableSet(blocks).iterator(); + int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) { + blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex); + return curIndex; + } + + + /** + * Used for testing only. + * @return the head of the blockList + */ + @VisibleForTesting + BlockInfo getBlockListHeadForTesting(){ + return blockList; } void updateState(StorageReport r) { @@ -344,27 +364,6 @@ StorageReport toStorageReport() { false, capacity, dfsUsed, remaining, blockPoolUsed, nonDfsUsed); } - /** - * The fill ratio of the underlying TreeSet holding blocks. - * - * @return the fill ratio of the tree - */ - public double treeSetFillRatio() { - return blocks.fillRatio(); - } - - /** - * Compact the underlying TreeSet holding blocks. - * - * @param timeout Maximum time to spend compacting the tree set in - * milliseconds. - * - * @return true if compaction completed, false if aborted - */ - public boolean treeSetCompact(long timeout) { - return blocks.compact(timeout); - } - static Iterable toStorageTypes( final Iterable infos) { return new Iterable() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 2d24c0f680e1b..5d3b1ba2556f7 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -413,7 +413,7 @@ List blockReport(long fullBrLeaseId) throws IOException { // Below split threshold, send all reports in a single message. DatanodeCommand cmd = bpNamenode.blockReport( bpRegistration, bpos.getBlockPoolId(), reports, - new BlockReportContext(1, 0, reportId, fullBrLeaseId, true)); + new BlockReportContext(1, 0, reportId, fullBrLeaseId)); blockReportSizes.add( calculateBlockReportPBSize(useBlocksBuffer, reports)); numRPCs = 1; @@ -428,7 +428,7 @@ List blockReport(long fullBrLeaseId) throws IOException { DatanodeCommand cmd = bpNamenode.blockReport( bpRegistration, bpos.getBlockPoolId(), singleReport, new BlockReportContext(reports.length, r, reportId, - fullBrLeaseId, true)); + fullBrLeaseId)); blockReportSizes.add( calculateBlockReportPBSize(useBlocksBuffer, singleReport)); numReportsSent++; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 7a8569d39008e..eef1732ac4426 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -515,8 +515,8 @@ private void scan() { Collection diffRecord = new ArrayList<>(); statsRecord.totalBlocks = blockpoolReport.size(); - final List bl; - bl = dataset.getSortedFinalizedBlocks(bpid); + final List bl = dataset.getFinalizedBlocks(bpid); + Collections.sort(bl); // Sort based on blockId int d = 0; // index for blockpoolReport int m = 0; // index for memReprot diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index 75368478ce5ed..f162ea1b3ae15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -238,17 +238,16 @@ StorageReport[] getStorageReports(String bpid) VolumeFailureSummary getVolumeFailureSummary(); /** - * Gets a sorted list of references to the finalized blocks for the given - * block pool. The list is sorted by blockID. + * Gets a list of references to the finalized blocks for the given block pool. *

    * Callers of this function should call * {@link FsDatasetSpi#acquireDatasetLock} to avoid blocks' status being * changed during list iteration. *

    * @return a list of references to the finalized blocks for the given block - * pool. The list is sorted by blockID. + * pool. */ - List getSortedFinalizedBlocks(String bpid); + List getFinalizedBlocks(String bpid); /** * Check whether the in-memory block record matches the block on the disk, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 3a4c3b09191e5..fb5d11e66c7c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -2148,18 +2148,17 @@ public Map getBlockReports(String bpid) { } /** - * Gets a list of references to the finalized blocks for the given block pool, - * sorted by blockID. + * Gets a list of references to the finalized blocks for the given block pool. *

    * Callers of this function should call * {@link FsDatasetSpi#acquireDatasetLock()} to avoid blocks' status being * changed during list iteration. *

    * @return a list of references to the finalized blocks for the given block - * pool. The list is sorted by blockID. + * pool. */ @Override - public List getSortedFinalizedBlocks(String bpid) { + public List getFinalizedBlocks(String bpid) { try (AutoCloseableLock lock = datasetReadLock.acquire()) { final List finalized = new ArrayList( volumeMap.size(bpid)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java index 5dfcc77174cd0..c1d103ed50dba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import java.util.Collection; -import java.util.Comparator; import java.util.HashMap; import java.util.Map; import java.util.concurrent.locks.ReadWriteLock; @@ -26,7 +25,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; -import org.apache.hadoop.hdfs.util.FoldedTreeSet; +import org.apache.hadoop.util.LightWeightResizableGSet; import org.apache.hadoop.util.AutoCloseableLock; /** @@ -37,20 +36,9 @@ class ReplicaMap { private final AutoCloseableLock readLock; private final AutoCloseableLock writeLock; - // Map of block pool Id to a set of ReplicaInfo. - private final Map> map = new HashMap<>(); - - // Special comparator used to compare Long to Block ID in the TreeSet. - private static final Comparator LONG_AND_BLOCK_COMPARATOR - = new Comparator() { - - @Override - public int compare(Object o1, Object o2) { - long lookup = (long) o1; - long stored = ((Block) o2).getBlockId(); - return lookup > stored ? 1 : lookup < stored ? -1 : 0; - } - }; + // Map of block pool Id to another map of block Id to ReplicaInfo. + private final Map> map = + new HashMap<>(); ReplicaMap(AutoCloseableLock readLock, AutoCloseableLock writeLock) { if (readLock == null || writeLock == null) { @@ -113,11 +101,8 @@ ReplicaInfo get(String bpid, Block block) { ReplicaInfo get(String bpid, long blockId) { checkBlockPool(bpid); try (AutoCloseableLock l = readLock.acquire()) { - FoldedTreeSet set = map.get(bpid); - if (set == null) { - return null; - } - return set.get(blockId, LONG_AND_BLOCK_COMPARATOR); + LightWeightResizableGSet m = map.get(bpid); + return m != null ? m.get(new Block(blockId)) : null; } } @@ -133,13 +118,13 @@ ReplicaInfo add(String bpid, ReplicaInfo replicaInfo) { checkBlockPool(bpid); checkBlock(replicaInfo); try (AutoCloseableLock l = writeLock.acquire()) { - FoldedTreeSet set = map.get(bpid); - if (set == null) { + LightWeightResizableGSet m = map.get(bpid); + if (m == null) { // Add an entry for block pool if it does not exist already - set = new FoldedTreeSet<>(); - map.put(bpid, set); + m = new LightWeightResizableGSet(); + map.put(bpid, m); } - return set.addOrReplace(replicaInfo); + return m.put(replicaInfo); } } @@ -151,18 +136,17 @@ ReplicaInfo addAndGet(String bpid, ReplicaInfo replicaInfo) { checkBlockPool(bpid); checkBlock(replicaInfo); try (AutoCloseableLock l = writeLock.acquire()) { - FoldedTreeSet set = map.get(bpid); - if (set == null) { + LightWeightResizableGSet m = map.get(bpid); + if (m == null) { // Add an entry for block pool if it does not exist already - set = new FoldedTreeSet<>(); - map.put(bpid, set); + m = new LightWeightResizableGSet(); + map.put(bpid, m); } - ReplicaInfo oldReplicaInfo = set.get(replicaInfo.getBlockId(), - LONG_AND_BLOCK_COMPARATOR); + ReplicaInfo oldReplicaInfo = m.get(replicaInfo); if (oldReplicaInfo != null) { return oldReplicaInfo; } else { - set.addOrReplace(replicaInfo); + m.put(replicaInfo); } return replicaInfo; } @@ -201,13 +185,12 @@ ReplicaInfo remove(String bpid, Block block) { checkBlockPool(bpid); checkBlock(block); try (AutoCloseableLock l = writeLock.acquire()) { - FoldedTreeSet set = map.get(bpid); - if (set != null) { - ReplicaInfo replicaInfo = - set.get(block.getBlockId(), LONG_AND_BLOCK_COMPARATOR); + LightWeightResizableGSet m = map.get(bpid); + if (m != null) { + ReplicaInfo replicaInfo = m.get(block); if (replicaInfo != null && block.getGenerationStamp() == replicaInfo.getGenerationStamp()) { - return set.removeAndGet(replicaInfo); + return m.remove(block); } } } @@ -224,9 +207,9 @@ ReplicaInfo remove(String bpid, Block block) { ReplicaInfo remove(String bpid, long blockId) { checkBlockPool(bpid); try (AutoCloseableLock l = writeLock.acquire()) { - FoldedTreeSet set = map.get(bpid); - if (set != null) { - return set.removeAndGet(blockId, LONG_AND_BLOCK_COMPARATOR); + LightWeightResizableGSet m = map.get(bpid); + if (m != null) { + return m.remove(new Block(blockId)); } } return null; @@ -239,8 +222,8 @@ ReplicaInfo remove(String bpid, long blockId) { */ int size(String bpid) { try (AutoCloseableLock l = readLock.acquire()) { - FoldedTreeSet set = map.get(bpid); - return set != null ? set.size() : 0; + LightWeightResizableGSet m = map.get(bpid); + return m != null ? m.size() : 0; } } @@ -255,17 +238,19 @@ int size(String bpid) { * @return a collection of the replicas belonging to the block pool */ Collection replicas(String bpid) { - return map.get(bpid); + LightWeightResizableGSet m = null; + m = map.get(bpid); + return m != null ? m.values() : null; } void initBlockPool(String bpid) { checkBlockPool(bpid); try (AutoCloseableLock l = writeLock.acquire()) { - FoldedTreeSet set = map.get(bpid); - if (set == null) { + LightWeightResizableGSet m = map.get(bpid); + if (m == null) { // Add an entry for block pool if it does not exist already - set = new FoldedTreeSet<>(); - map.put(bpid, set); + m = new LightWeightResizableGSet(); + map.put(bpid, m); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6db856b89a20d..32da3c793c35f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1997,7 +1997,12 @@ public BatchedListEntries getFilesBlockingDecom(long prevId, LightWeightHashSet openFileIds = new LightWeightHashSet<>(); for (DatanodeDescriptor dataNode : blockManager.getDatanodeManager().getDatanodes()) { - for (long ucFileId : dataNode.getLeavingServiceStatus().getOpenFiles()) { + // Sort open files + LightWeightHashSet dnOpenFiles = + dataNode.getLeavingServiceStatus().getOpenFiles(); + Long[] dnOpenFileIds = new Long[dnOpenFiles.size()]; + Arrays.sort(dnOpenFiles.toArray(dnOpenFileIds)); + for (Long ucFileId : dnOpenFileIds) { INode ucFile = getFSDirectory().getInode(ucFileId); if (ucFile == null || ucFileId <= prevId || openFileIds.contains(ucFileId)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java index 94749e2d5bd4f..5bcd719b70499 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java @@ -52,16 +52,12 @@ public class BlockReportContext { */ private final long leaseId; - private final boolean sorted; - public BlockReportContext(int totalRpcs, int curRpc, - long reportId, long leaseId, - boolean sorted) { + long reportId, long leaseId) { this.totalRpcs = totalRpcs; this.curRpc = curRpc; this.reportId = reportId; this.leaseId = leaseId; - this.sorted = sorted; } public int getTotalRpcs() { @@ -79,8 +75,4 @@ public long getReportId() { public long getLeaseId() { return leaseId; } - - public boolean isSorted() { - return sorted; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 5680ef3162ed0..24cd7aa1155fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -140,6 +140,7 @@ public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, * Each finalized block is represented as 3 longs. Each under- * construction replica is represented as 4 longs. * This is done instead of Block[] to reduce memory used by block reports. + * @param reports report of blocks per storage * @param context Context information for this block report. * * @return - the next command for DN to process. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/FoldedTreeSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/FoldedTreeSet.java deleted file mode 100644 index 1c6be1d629830..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/FoldedTreeSet.java +++ /dev/null @@ -1,1285 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.util; - -import org.apache.hadoop.util.Time; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Comparator; -import java.util.ConcurrentModificationException; -import java.util.Iterator; -import java.util.NoSuchElementException; -import java.util.Objects; -import java.util.SortedSet; - -/** - * A memory efficient implementation of RBTree. Instead of having a Node for - * each entry each node contains an array holding 64 entries. - * - * Based on the Apache Harmony folded TreeMap. - * - * @param Entry type - */ -public class FoldedTreeSet implements SortedSet { - - private static final boolean RED = true; - private static final boolean BLACK = false; - - private final Comparator comparator; - private Node root; - private int size; - private int nodeCount; - private int modCount; - private Node cachedNode; - - /** - * Internal tree node that holds a sorted array of entries. - * - * @param type of the elements - */ - private static class Node { - - private static final int NODE_SIZE = 64; - - // Tree structure - private Node parent, left, right; - private boolean color; - private final E[] entries; - private int leftIndex = 0, rightIndex = -1; - private int size = 0; - // List for fast ordered iteration - private Node prev, next; - - @SuppressWarnings("unchecked") - public Node() { - entries = (E[]) new Object[NODE_SIZE]; - } - - public boolean isRed() { - return color == RED; - } - - public boolean isBlack() { - return color == BLACK; - } - - public Node getLeftMostNode() { - Node node = this; - while (node.left != null) { - node = node.left; - } - return node; - } - - public Node getRightMostNode() { - Node node = this; - while (node.right != null) { - node = node.right; - } - return node; - } - - public void addEntryLeft(E entry) { - assert rightIndex < entries.length; - assert !isFull(); - - if (leftIndex == 0) { - rightIndex++; - // Shift entries right/up - System.arraycopy(entries, 0, entries, 1, size); - } else { - leftIndex--; - } - size++; - entries[leftIndex] = entry; - } - - public void addEntryRight(E entry) { - assert !isFull(); - - if (rightIndex == NODE_SIZE - 1) { - assert leftIndex > 0; - // Shift entries left/down - System.arraycopy(entries, leftIndex, entries, --leftIndex, size); - } else { - rightIndex++; - } - size++; - entries[rightIndex] = entry; - } - - public void addEntryAt(E entry, int index) { - assert !isFull(); - - if (leftIndex == 0 || ((rightIndex != Node.NODE_SIZE - 1) - && (rightIndex - index <= index - leftIndex))) { - rightIndex++; - System.arraycopy(entries, index, - entries, index + 1, rightIndex - index); - entries[index] = entry; - } else { - int newLeftIndex = leftIndex - 1; - System.arraycopy(entries, leftIndex, - entries, newLeftIndex, index - leftIndex); - leftIndex = newLeftIndex; - entries[index - 1] = entry; - } - size++; - } - - public void addEntriesLeft(Node from) { - leftIndex -= from.size; - size += from.size; - System.arraycopy(from.entries, from.leftIndex, - entries, leftIndex, from.size); - } - - public void addEntriesRight(Node from) { - System.arraycopy(from.entries, from.leftIndex, - entries, rightIndex + 1, from.size); - size += from.size; - rightIndex += from.size; - } - - public E insertEntrySlideLeft(E entry, int index) { - E pushedEntry = entries[0]; - System.arraycopy(entries, 1, entries, 0, index - 1); - entries[index - 1] = entry; - return pushedEntry; - } - - public E insertEntrySlideRight(E entry, int index) { - E movedEntry = entries[rightIndex]; - System.arraycopy(entries, index, entries, index + 1, rightIndex - index); - entries[index] = entry; - return movedEntry; - } - - public E removeEntryLeft() { - assert !isEmpty(); - E entry = entries[leftIndex]; - entries[leftIndex] = null; - leftIndex++; - size--; - return entry; - } - - public E removeEntryRight() { - assert !isEmpty(); - E entry = entries[rightIndex]; - entries[rightIndex] = null; - rightIndex--; - size--; - return entry; - } - - public E removeEntryAt(int index) { - assert !isEmpty(); - - E entry = entries[index]; - int rightSize = rightIndex - index; - int leftSize = index - leftIndex; - if (rightSize <= leftSize) { - System.arraycopy(entries, index + 1, entries, index, rightSize); - entries[rightIndex] = null; - rightIndex--; - } else { - System.arraycopy(entries, leftIndex, entries, leftIndex + 1, leftSize); - entries[leftIndex] = null; - leftIndex++; - } - size--; - return entry; - } - - public boolean isFull() { - return size == NODE_SIZE; - } - - public boolean isEmpty() { - return size == 0; - } - - public void clear() { - if (leftIndex < rightIndex) { - Arrays.fill(entries, leftIndex, rightIndex + 1, null); - } - size = 0; - leftIndex = 0; - rightIndex = -1; - prev = null; - next = null; - parent = null; - left = null; - right = null; - color = BLACK; - } - } - - private static final class TreeSetIterator implements Iterator { - - private final FoldedTreeSet tree; - private int iteratorModCount; - private Node node; - private int index; - private E lastEntry; - private int lastIndex; - private Node lastNode; - - private TreeSetIterator(FoldedTreeSet tree) { - this.tree = tree; - this.iteratorModCount = tree.modCount; - if (!tree.isEmpty()) { - this.node = tree.root.getLeftMostNode(); - this.index = this.node.leftIndex; - } - } - - @Override - public boolean hasNext() { - checkForModification(); - return node != null; - } - - @Override - public E next() { - if (hasNext()) { - lastEntry = node.entries[index]; - lastIndex = index; - lastNode = node; - if (++index > node.rightIndex) { - node = node.next; - if (node != null) { - index = node.leftIndex; - } - } - return lastEntry; - } else { - throw new NoSuchElementException("Iterator exhausted"); - } - } - - @Override - public void remove() { - if (lastEntry == null) { - throw new IllegalStateException("No current element"); - } - checkForModification(); - if (lastNode.size == 1) { - // Safe to remove lastNode, the iterator is on the next node - tree.deleteNode(lastNode); - } else if (lastNode.leftIndex == lastIndex) { - // Safe to remove leftmost entry, the iterator is on the next index - lastNode.removeEntryLeft(); - } else if (lastNode.rightIndex == lastIndex) { - // Safe to remove the rightmost entry, the iterator is on the next node - lastNode.removeEntryRight(); - } else { - // Remove entry in the middle of the array - assert node == lastNode; - int oldRIndex = lastNode.rightIndex; - lastNode.removeEntryAt(lastIndex); - if (oldRIndex > lastNode.rightIndex) { - // Entries moved to the left in the array so index must be reset - index = lastIndex; - } - } - lastEntry = null; - iteratorModCount++; - tree.modCount++; - tree.size--; - } - - private void checkForModification() { - if (iteratorModCount != tree.modCount) { - throw new ConcurrentModificationException("Tree has been modified " - + "outside of iterator"); - } - } - } - - /** - * Create a new TreeSet that uses the natural ordering of objects. The element - * type must implement Comparable. - */ - public FoldedTreeSet() { - this(null); - } - - /** - * Create a new TreeSet that orders the elements using the supplied - * Comparator. - * - * @param comparator Comparator able to compare elements of type E - */ - public FoldedTreeSet(Comparator comparator) { - this.comparator = comparator; - } - - private Node cachedOrNewNode(E entry) { - Node node = (cachedNode != null) ? cachedNode : new Node(); - cachedNode = null; - nodeCount++; - // Since BlockIDs are always increasing for new blocks it is best to - // add values on the left side to enable quicker inserts on the right - node.addEntryLeft(entry); - return node; - } - - private void cacheAndClear(Node node) { - if (cachedNode == null) { - node.clear(); - cachedNode = node; - } - } - - @Override - public Comparator comparator() { - return comparator; - } - - @Override - public SortedSet subSet(E fromElement, E toElement) { - throw new UnsupportedOperationException("Not supported yet."); - } - - @Override - public SortedSet headSet(E toElement) { - throw new UnsupportedOperationException("Not supported yet."); - } - - @Override - public SortedSet tailSet(E fromElement) { - throw new UnsupportedOperationException("Not supported yet."); - } - - @Override - public E first() { - if (!isEmpty()) { - Node node = root.getLeftMostNode(); - return node.entries[node.leftIndex]; - } - return null; - } - - @Override - public E last() { - if (!isEmpty()) { - Node node = root.getRightMostNode(); - return node.entries[node.rightIndex]; - } - return null; - } - - @Override - public int size() { - return size; - } - - @Override - public boolean isEmpty() { - return root == null; - } - - /** - * Lookup and return a stored object using a user provided comparator. - * - * @param obj Lookup key - * @param cmp User provided Comparator. The comparator should expect that the - * proved obj will always be the first method parameter and any - * stored object will be the second parameter. - * - * @return A matching stored object or null if non is found - */ - public E get(Object obj, Comparator cmp) { - Objects.requireNonNull(obj); - - Node node = root; - while (node != null) { - E[] entries = node.entries; - - int leftIndex = node.leftIndex; - int result = compare(obj, entries[leftIndex], cmp); - if (result < 0) { - node = node.left; - } else if (result == 0) { - return entries[leftIndex]; - } else { - int rightIndex = node.rightIndex; - if (leftIndex != rightIndex) { - result = compare(obj, entries[rightIndex], cmp); - } - if (result == 0) { - return entries[rightIndex]; - } else if (result > 0) { - node = node.right; - } else { - int low = leftIndex + 1; - int high = rightIndex - 1; - while (low <= high) { - int mid = (low + high) >>> 1; - result = compare(obj, entries[mid], cmp); - if (result > 0) { - low = mid + 1; - } else if (result < 0) { - high = mid - 1; - } else { - return entries[mid]; - } - } - return null; - } - } - } - return null; - } - - /** - * Lookup and return a stored object. - * - * @param entry Lookup entry - * - * @return A matching stored object or null if non is found - */ - public E get(E entry) { - return get(entry, comparator); - } - - @Override - @SuppressWarnings("unchecked") - public boolean contains(Object obj) { - return get((E) obj) != null; - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - private static int compare(Object lookup, Object stored, Comparator cmp) { - return cmp != null - ? cmp.compare(lookup, stored) - : ((Comparable) lookup).compareTo(stored); - } - - @Override - public Iterator iterator() { - return new TreeSetIterator<>(this); - } - - @Override - public Object[] toArray() { - Object[] objects = new Object[size]; - if (!isEmpty()) { - int pos = 0; - for (Node node = root.getLeftMostNode(); node != null; - pos += node.size, node = node.next) { - System.arraycopy(node.entries, node.leftIndex, objects, pos, node.size); - } - } - return objects; - } - - @Override - @SuppressWarnings("unchecked") - public T[] toArray(T[] a) { - T[] r = a.length >= size ? a - : (T[]) java.lang.reflect.Array - .newInstance(a.getClass().getComponentType(), size); - if (!isEmpty()) { - Node node = root.getLeftMostNode(); - int pos = 0; - while (node != null) { - System.arraycopy(node.entries, node.leftIndex, r, pos, node.size); - pos += node.size; - node = node.next; - } - if (r.length > pos) { - r[pos] = null; - } - } else if (a.length > 0) { - a[0] = null; - } - return r; - } - - /** - * Add or replace an entry in the TreeSet. - * - * @param entry Entry to add or replace/update. - * - * @return the previous entry, or null if this set did not already contain the - * specified entry - */ - public E addOrReplace(E entry) { - return add(entry, true); - } - - @Override - public boolean add(E entry) { - return add(entry, false) == null; - } - - /** - * Internal add method to add a entry to the set. - * - * @param entry Entry to add - * @param replace Should the entry replace an old entry which is equal to the - * new entry - * - * @return null if entry added and didn't exist or the previous value (which - * might not have been overwritten depending on the replace parameter) - */ - private E add(E entry, boolean replace) { - Objects.requireNonNull(entry); - - // Empty tree - if (isEmpty()) { - root = cachedOrNewNode(entry); - size = 1; - modCount++; - return null; - } - - // Compare right entry first since inserts of comperatively larger entries - // is more likely to be inserted. BlockID is always increasing in HDFS. - Node node = root; - Node prevNode = null; - int result = 0; - while (node != null) { - prevNode = node; - E[] entries = node.entries; - int rightIndex = node.rightIndex; - result = compare(entry, entries[rightIndex], comparator); - if (result > 0) { - node = node.right; - } else if (result == 0) { - E prevEntry = entries[rightIndex]; - if (replace) { - entries[rightIndex] = entry; - } - return prevEntry; - } else { - int leftIndex = node.leftIndex; - if (leftIndex != rightIndex) { - result = compare(entry, entries[leftIndex], comparator); - } - if (result < 0) { - node = node.left; - } else if (result == 0) { - E prevEntry = entries[leftIndex]; - if (replace) { - entries[leftIndex] = entry; - } - return prevEntry; - } else { - // Insert in this node - int low = leftIndex + 1, high = rightIndex - 1; - while (low <= high) { - int mid = (low + high) >>> 1; - result = compare(entry, entries[mid], comparator); - if (result > 0) { - low = mid + 1; - } else if (result == 0) { - E prevEntry = entries[mid]; - if (replace) { - entries[mid] = entry; - } - return prevEntry; - } else { - high = mid - 1; - } - } - addElementInNode(node, entry, low); - return null; - } - } - } - - assert prevNode != null; - size++; - modCount++; - if (!prevNode.isFull()) { - // The previous node still has space - if (result < 0) { - prevNode.addEntryLeft(entry); - } else { - prevNode.addEntryRight(entry); - } - } else if (result < 0) { - // The previous node is full, add to adjencent node or a new node - if (prevNode.prev != null && !prevNode.prev.isFull()) { - prevNode.prev.addEntryRight(entry); - } else { - attachNodeLeft(prevNode, cachedOrNewNode(entry)); - } - } else if (prevNode.next != null && !prevNode.next.isFull()) { - prevNode.next.addEntryLeft(entry); - } else { - attachNodeRight(prevNode, cachedOrNewNode(entry)); - } - return null; - } - - /** - * Insert an entry last in the sorted tree. The entry must be the considered - * larger than the currently largest entry in the set when doing - * current.compareTo(entry), if entry is not the largest entry the method will - * fall back on the regular add method. - * - * @param entry entry to add - * - * @return True if added, false if already existed in the set - */ - public boolean addSortedLast(E entry) { - - if (isEmpty()) { - root = cachedOrNewNode(entry); - size = 1; - modCount++; - return true; - } else { - Node node = root.getRightMostNode(); - if (compare(node.entries[node.rightIndex], entry, comparator) < 0) { - size++; - modCount++; - if (!node.isFull()) { - node.addEntryRight(entry); - } else { - attachNodeRight(node, cachedOrNewNode(entry)); - } - return true; - } - } - - // Fallback on normal add if entry is unsorted - return add(entry); - } - - private void addElementInNode(Node node, E entry, int index) { - size++; - modCount++; - - if (!node.isFull()) { - node.addEntryAt(entry, index); - } else { - // Node is full, insert and push old entry - Node prev = node.prev; - Node next = node.next; - if (prev == null) { - // First check if we have space in the the next node - if (next != null && !next.isFull()) { - E movedEntry = node.insertEntrySlideRight(entry, index); - next.addEntryLeft(movedEntry); - } else { - // Since prev is null the left child must be null - assert node.left == null; - E movedEntry = node.insertEntrySlideLeft(entry, index); - Node newNode = cachedOrNewNode(movedEntry); - attachNodeLeft(node, newNode); - } - } else if (!prev.isFull()) { - // Prev has space - E movedEntry = node.insertEntrySlideLeft(entry, index); - prev.addEntryRight(movedEntry); - } else if (next == null) { - // Since next is null the right child must be null - assert node.right == null; - E movedEntry = node.insertEntrySlideRight(entry, index); - Node newNode = cachedOrNewNode(movedEntry); - attachNodeRight(node, newNode); - } else if (!next.isFull()) { - // Next has space - E movedEntry = node.insertEntrySlideRight(entry, index); - next.addEntryLeft(movedEntry); - } else { - // Both prev and next nodes exist and are full - E movedEntry = node.insertEntrySlideRight(entry, index); - Node newNode = cachedOrNewNode(movedEntry); - if (node.right == null) { - attachNodeRight(node, newNode); - } else { - // Since our right node exist, - // the left node of our next node must be empty - assert next.left == null; - attachNodeLeft(next, newNode); - } - } - } - } - - private void attachNodeLeft(Node node, Node newNode) { - newNode.parent = node; - node.left = newNode; - - newNode.next = node; - newNode.prev = node.prev; - if (newNode.prev != null) { - newNode.prev.next = newNode; - } - node.prev = newNode; - balanceInsert(newNode); - } - - private void attachNodeRight(Node node, Node newNode) { - newNode.parent = node; - node.right = newNode; - - newNode.prev = node; - newNode.next = node.next; - if (newNode.next != null) { - newNode.next.prev = newNode; - } - node.next = newNode; - balanceInsert(newNode); - } - - /** - * Balance the RB Tree after insert. - * - * @param node Added node - */ - private void balanceInsert(Node node) { - node.color = RED; - - while (node != root && node.parent.isRed()) { - if (node.parent == node.parent.parent.left) { - Node uncle = node.parent.parent.right; - if (uncle != null && uncle.isRed()) { - node.parent.color = BLACK; - uncle.color = BLACK; - node.parent.parent.color = RED; - node = node.parent.parent; - } else { - if (node == node.parent.right) { - node = node.parent; - rotateLeft(node); - } - node.parent.color = BLACK; - node.parent.parent.color = RED; - rotateRight(node.parent.parent); - } - } else { - Node uncle = node.parent.parent.left; - if (uncle != null && uncle.isRed()) { - node.parent.color = BLACK; - uncle.color = BLACK; - node.parent.parent.color = RED; - node = node.parent.parent; - } else { - if (node == node.parent.left) { - node = node.parent; - rotateRight(node); - } - node.parent.color = BLACK; - node.parent.parent.color = RED; - rotateLeft(node.parent.parent); - } - } - } - root.color = BLACK; - } - - private void rotateRight(Node node) { - Node pivot = node.left; - node.left = pivot.right; - if (pivot.right != null) { - pivot.right.parent = node; - } - pivot.parent = node.parent; - if (node.parent == null) { - root = pivot; - } else if (node == node.parent.right) { - node.parent.right = pivot; - } else { - node.parent.left = pivot; - } - pivot.right = node; - node.parent = pivot; - } - - private void rotateLeft(Node node) { - Node pivot = node.right; - node.right = pivot.left; - if (pivot.left != null) { - pivot.left.parent = node; - } - pivot.parent = node.parent; - if (node.parent == null) { - root = pivot; - } else if (node == node.parent.left) { - node.parent.left = pivot; - } else { - node.parent.right = pivot; - } - pivot.left = node; - node.parent = pivot; - } - - /** - * Remove object using a provided comparator, and return the removed entry. - * - * @param obj Lookup entry - * @param cmp User provided Comparator. The comparator should expect that the - * proved obj will always be the first method parameter and any - * stored object will be the second parameter. - * - * @return The removed entry or null if not found - */ - public E removeAndGet(Object obj, Comparator cmp) { - Objects.requireNonNull(obj); - - if (!isEmpty()) { - Node node = root; - while (node != null) { - E[] entries = node.entries; - int leftIndex = node.leftIndex; - int result = compare(obj, entries[leftIndex], cmp); - if (result < 0) { - node = node.left; - } else if (result == 0) { - return removeElementLeft(node); - } else { - int rightIndex = node.rightIndex; - if (leftIndex != rightIndex) { - result = compare(obj, entries[rightIndex], cmp); - } - if (result == 0) { - return removeElementRight(node); - } else if (result > 0) { - node = node.right; - } else { - int low = leftIndex + 1, high = rightIndex - 1; - while (low <= high) { - int mid = (low + high) >>> 1; - result = compare(obj, entries[mid], cmp); - if (result > 0) { - low = mid + 1; - } else if (result == 0) { - return removeElementAt(node, mid); - } else { - high = mid - 1; - } - } - return null; - } - } - } - } - return null; - } - - /** - * Remove object and return the removed entry. - * - * @param obj Lookup entry - * - * @return The removed entry or null if not found - */ - public E removeAndGet(Object obj) { - return removeAndGet(obj, comparator); - } - - /** - * Remove object using a provided comparator. - * - * @param obj Lookup entry - * @param cmp User provided Comparator. The comparator should expect that the - * proved obj will always be the first method parameter and any - * stored object will be the second parameter. - * - * @return True if found and removed, else false - */ - public boolean remove(Object obj, Comparator cmp) { - return removeAndGet(obj, cmp) != null; - } - - @Override - public boolean remove(Object obj) { - return removeAndGet(obj, comparator) != null; - } - - private E removeElementLeft(Node node) { - modCount++; - size--; - E entry = node.removeEntryLeft(); - - if (node.isEmpty()) { - deleteNode(node); - } else if (node.prev != null - && (Node.NODE_SIZE - 1 - node.prev.rightIndex) >= node.size) { - // Remaining entries fit in the prev node, move them and delete this node - node.prev.addEntriesRight(node); - deleteNode(node); - } else if (node.next != null && node.next.leftIndex >= node.size) { - // Remaining entries fit in the next node, move them and delete this node - node.next.addEntriesLeft(node); - deleteNode(node); - } else if (node.prev != null && node.prev.size < node.leftIndex) { - // Entries in prev node will fit in this node, move them and delete prev - node.addEntriesLeft(node.prev); - deleteNode(node.prev); - } - - return entry; - } - - private E removeElementRight(Node node) { - modCount++; - size--; - E entry = node.removeEntryRight(); - - if (node.isEmpty()) { - deleteNode(node); - } else if (node.prev != null - && (Node.NODE_SIZE - 1 - node.prev.rightIndex) >= node.size) { - // Remaining entries fit in the prev node, move them and delete this node - node.prev.addEntriesRight(node); - deleteNode(node); - } else if (node.next != null && node.next.leftIndex >= node.size) { - // Remaining entries fit in the next node, move them and delete this node - node.next.addEntriesLeft(node); - deleteNode(node); - } else if (node.next != null - && node.next.size < (Node.NODE_SIZE - 1 - node.rightIndex)) { - // Entries in next node will fit in this node, move them and delete next - node.addEntriesRight(node.next); - deleteNode(node.next); - } - - return entry; - } - - private E removeElementAt(Node node, int index) { - modCount++; - size--; - E entry = node.removeEntryAt(index); - - if (node.prev != null - && (Node.NODE_SIZE - 1 - node.prev.rightIndex) >= node.size) { - // Remaining entries fit in the prev node, move them and delete this node - node.prev.addEntriesRight(node); - deleteNode(node); - } else if (node.next != null && (node.next.leftIndex) >= node.size) { - // Remaining entries fit in the next node, move them and delete this node - node.next.addEntriesLeft(node); - deleteNode(node); - } else if (node.prev != null && node.prev.size < node.leftIndex) { - // Entries in prev node will fit in this node, move them and delete prev - node.addEntriesLeft(node.prev); - deleteNode(node.prev); - } else if (node.next != null - && node.next.size < (Node.NODE_SIZE - 1 - node.rightIndex)) { - // Entries in next node will fit in this node, move them and delete next - node.addEntriesRight(node.next); - deleteNode(node.next); - } - - return entry; - } - - /** - * Delete the node and ensure the tree is balanced. - * - * @param node node to delete - */ - private void deleteNode(final Node node) { - if (node.right == null) { - if (node.left != null) { - attachToParent(node, node.left); - } else { - attachNullToParent(node); - } - } else if (node.left == null) { - attachToParent(node, node.right); - } else { - // node.left != null && node.right != null - // node.next should replace node in tree - // node.next != null guaranteed since node.left != null - // node.next.left == null since node.next.prev is node - // node.next.right may be null or non-null - Node toMoveUp = node.next; - if (toMoveUp.right == null) { - attachNullToParent(toMoveUp); - } else { - attachToParent(toMoveUp, toMoveUp.right); - } - toMoveUp.left = node.left; - if (toMoveUp.left != null) { - toMoveUp.left.parent = toMoveUp; - } - toMoveUp.right = node.right; - if (toMoveUp.right != null) { - toMoveUp.right.parent = toMoveUp; - } - attachToParentNoBalance(node, toMoveUp); - toMoveUp.color = node.color; - } - - // Remove node from ordered list of nodes - if (node.prev != null) { - node.prev.next = node.next; - } - if (node.next != null) { - node.next.prev = node.prev; - } - - nodeCount--; - cacheAndClear(node); - } - - private void attachToParentNoBalance(Node toDelete, Node toConnect) { - Node parent = toDelete.parent; - toConnect.parent = parent; - if (parent == null) { - root = toConnect; - } else if (toDelete == parent.left) { - parent.left = toConnect; - } else { - parent.right = toConnect; - } - } - - private void attachToParent(Node toDelete, Node toConnect) { - attachToParentNoBalance(toDelete, toConnect); - if (toDelete.isBlack()) { - balanceDelete(toConnect); - } - } - - private void attachNullToParent(Node toDelete) { - Node parent = toDelete.parent; - if (parent == null) { - root = null; - } else { - if (toDelete == parent.left) { - parent.left = null; - } else { - parent.right = null; - } - if (toDelete.isBlack()) { - balanceDelete(parent); - } - } - } - - /** - * Balance tree after removing a node. - * - * @param node Node to balance after deleting another node - */ - private void balanceDelete(Node node) { - while (node != root && node.isBlack()) { - if (node == node.parent.left) { - Node sibling = node.parent.right; - if (sibling == null) { - node = node.parent; - continue; - } - if (sibling.isRed()) { - sibling.color = BLACK; - node.parent.color = RED; - rotateLeft(node.parent); - sibling = node.parent.right; - if (sibling == null) { - node = node.parent; - continue; - } - } - if ((sibling.left == null || !sibling.left.isRed()) - && (sibling.right == null || !sibling.right.isRed())) { - sibling.color = RED; - node = node.parent; - } else { - if (sibling.right == null || !sibling.right.isRed()) { - sibling.left.color = BLACK; - sibling.color = RED; - rotateRight(sibling); - sibling = node.parent.right; - } - sibling.color = node.parent.color; - node.parent.color = BLACK; - sibling.right.color = BLACK; - rotateLeft(node.parent); - node = root; - } - } else { - Node sibling = node.parent.left; - if (sibling == null) { - node = node.parent; - continue; - } - if (sibling.isRed()) { - sibling.color = BLACK; - node.parent.color = RED; - rotateRight(node.parent); - sibling = node.parent.left; - if (sibling == null) { - node = node.parent; - continue; - } - } - if ((sibling.left == null || sibling.left.isBlack()) - && (sibling.right == null || sibling.right.isBlack())) { - sibling.color = RED; - node = node.parent; - } else { - if (sibling.left == null || sibling.left.isBlack()) { - sibling.right.color = BLACK; - sibling.color = RED; - rotateLeft(sibling); - sibling = node.parent.left; - } - sibling.color = node.parent.color; - node.parent.color = BLACK; - sibling.left.color = BLACK; - rotateRight(node.parent); - node = root; - } - } - } - node.color = BLACK; - } - - @Override - public boolean containsAll(Collection c) { - for (Object entry : c) { - if (!contains(entry)) { - return false; - } - } - return true; - } - - @Override - public boolean addAll(Collection c) { - boolean modified = false; - for (E entry : c) { - if (add(entry)) { - modified = true; - } - } - return modified; - } - - @Override - public boolean retainAll(Collection c) { - boolean modified = false; - Iterator it = iterator(); - while (it.hasNext()) { - if (!c.contains(it.next())) { - it.remove(); - modified = true; - } - } - return modified; - } - - @Override - public boolean removeAll(Collection c) { - boolean modified = false; - for (Object entry : c) { - if (remove(entry)) { - modified = true; - } - } - return modified; - } - - @Override - public void clear() { - modCount++; - if (!isEmpty()) { - size = 0; - nodeCount = 0; - cacheAndClear(root); - root = null; - } - } - - /** - * Returns the current size divided by the capacity of the tree. A value - * between 0.0 and 1.0, where 1.0 means that every allocated node in the tree - * is completely full. - * - * An empty set will return 1.0 - * - * @return the fill ratio of the tree - */ - public double fillRatio() { - if (nodeCount > 1) { - // Count the last node as completely full since it can't be compacted - return (size + (Node.NODE_SIZE - root.getRightMostNode().size)) - / (double) (nodeCount * Node.NODE_SIZE); - } - return 1.0; - } - - /** - * Compact all the entries to use the fewest number of nodes in the tree. - * - * Having a compact tree minimize memory usage, but can cause inserts to get - * slower due to new nodes needs to be allocated as there is no space in any - * of the existing nodes anymore for entries added in the middle of the set. - * - * Useful to do to reduce memory consumption and if the tree is know to not - * change after compaction or mainly added to at either extreme. - * - * @param timeout Maximum time to spend compacting the tree set in - * milliseconds. - * - * @return true if compaction completed, false if aborted - */ - public boolean compact(long timeout) { - - if (!isEmpty()) { - long start = Time.monotonicNow(); - Node node = root.getLeftMostNode(); - while (node != null) { - if (node.prev != null && !node.prev.isFull()) { - Node prev = node.prev; - int count = Math.min(Node.NODE_SIZE - prev.size, node.size); - System.arraycopy(node.entries, node.leftIndex, - prev.entries, prev.rightIndex + 1, count); - node.leftIndex += count; - node.size -= count; - prev.rightIndex += count; - prev.size += count; - } - if (node.isEmpty()) { - Node temp = node.next; - deleteNode(node); - node = temp; - continue; - } else if (!node.isFull()) { - if (node.leftIndex != 0) { - System.arraycopy(node.entries, node.leftIndex, - node.entries, 0, node.size); - Arrays.fill(node.entries, node.size, node.rightIndex + 1, null); - node.leftIndex = 0; - node.rightIndex = node.size - 1; - } - } - node = node.next; - - if (Time.monotonicNow() - start > timeout) { - return false; - } - } - } - - return true; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 0e241301e0865..4a98f2d01e9e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -257,8 +257,9 @@ message BlockReportContextProto { // bypass rate-limiting. optional uint64 leaseId = 4 [ default = 0 ]; + // for compatibility, field number 5 should not be reused, see HDFS-13671. // True if the reported blocks are sorted by increasing block IDs - optional bool sorted = 5 [default = false]; + // optional bool sorted = 5 [default = false]; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 6e8e36e1cf0dc..e540a677e0e24 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -5195,32 +5195,6 @@ - - dfs.namenode.storageinfo.defragment.timeout.ms - 4 - - Timeout value in ms for the StorageInfo compaction run. - - - - - dfs.namenode.storageinfo.defragment.interval.ms - 600000 - - The thread for checking the StorageInfo for defragmentation will - run periodically. The time between runs is determined by this - property. - - - - - dfs.namenode.storageinfo.defragment.ratio - 0.75 - - The defragmentation threshold for the StorageInfo. - - - dfs.namenode.snapshot.capture.openfiles false diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java index df6a7dc814558..917f0dbe09395 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java @@ -173,7 +173,7 @@ private void thistest(Configuration conf, DFSTestUtil util) throws Exception { final DataNode dn = cluster.getDataNodes().get(dnIdx); final String bpid = cluster.getNamesystem().getBlockPoolId(); List replicas = - dn.getFSDataset().getSortedFinalizedBlocks(bpid); + dn.getFSDataset().getFinalizedBlocks(bpid); assertTrue("Replicas do not exist", !replicas.isEmpty()); for (int idx = 0; idx < replicas.size(); idx++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 67df6d8a8dbe6..fa3c1aa68130f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -568,7 +568,7 @@ private void testErasureCodingWorkerXmitsWeight( writeFile(fs, "/ec-xmits-weight", fileLen); DataNode dn = cluster.getDataNodes().get(0); - int corruptBlocks = dn.getFSDataset().getSortedFinalizedBlocks( + int corruptBlocks = dn.getFSDataset().getFinalizedBlocks( cluster.getNameNode().getNamesystem().getBlockPoolId()).size(); int expectedXmits = corruptBlocks * expectedWeight; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java index 0aff861a10888..17b3939c9af11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java @@ -240,7 +240,7 @@ public BlockReportResponseProto answer(InvocationOnMock invocation) { request.set(null); nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask()); nn.blockReport(reg, "pool", sbr, - new BlockReportContext(1, 0, System.nanoTime(), 0L, true)); + new BlockReportContext(1, 0, System.nanoTime(), 0L)); BlockReportRequestProto proto = request.get(); assertNotNull(proto); assertTrue(proto.getReports(0).getBlocksList().isEmpty()); @@ -253,7 +253,7 @@ public BlockReportResponseProto answer(InvocationOnMock invocation) { StorageBlockReport[] obp = new StorageBlockReport[] { new StorageBlockReport(new DatanodeStorage("s1"), blockList) }; nn.blockReport(reg, "pool", obp, - new BlockReportContext(1, 0, System.nanoTime(), 0L, true)); + new BlockReportContext(1, 0, System.nanoTime(), 0L)); proto = request.get(); assertNotNull(proto); assertFalse(proto.getReports(0).getBlocksList().isEmpty()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java index 3c5c5d9fb2fee..70f13eb8af2e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java @@ -19,6 +19,12 @@ import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID; import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Random; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult; +import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.junit.Assert; import org.junit.Test; @@ -133,4 +140,85 @@ public void testAddStorageWithDifferentBlock() throws Exception { "storageID", "127.0.0.1"); blockInfo1.addStorage(storage, blockInfo2); } + + @Test + public void testBlockListMoveToHead() throws Exception { + LOG.info("BlockInfo moveToHead tests..."); + + final int maxBlocks = 10; + + DatanodeStorageInfo dd = + DFSTestUtil.createDatanodeStorageInfo("s1", "1.1.1.1"); + ArrayList blockList = new ArrayList(maxBlocks); + ArrayList blockInfoList = new ArrayList(); + int headIndex; + int curIndex; + + LOG.info("Building block list..."); + for (int i = 0; i < maxBlocks; i++) { + blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP)); + blockInfoList.add(new BlockInfoContiguous(blockList.get(i), (short) 3)); + dd.addBlock(blockInfoList.get(i)); + + // index of the datanode should be 0 + assertEquals("Find datanode should be 0", 0, blockInfoList.get(i) + .findStorageInfo(dd)); + } + + // list length should be equal to the number of blocks we inserted + LOG.info("Checking list length..."); + assertEquals("Length should be MAX_BLOCK", maxBlocks, dd.numBlocks()); + Iterator it = dd.getBlockIterator(); + int len = 0; + while (it.hasNext()) { + it.next(); + len++; + } + assertEquals("There should be MAX_BLOCK blockInfo's", maxBlocks, len); + + headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd); + + LOG.info("Moving each block to the head of the list..."); + for (int i = 0; i < maxBlocks; i++) { + curIndex = blockInfoList.get(i).findStorageInfo(dd); + headIndex = dd.moveBlockToHead(blockInfoList.get(i), curIndex, headIndex); + // the moved element must be at the head of the list + assertEquals("Block should be at the head of the list now.", + blockInfoList.get(i), dd.getBlockListHeadForTesting()); + } + + // move head of the list to the head - this should not change the list + LOG.info("Moving head to the head..."); + + BlockInfo temp = dd.getBlockListHeadForTesting(); + curIndex = 0; + headIndex = 0; + dd.moveBlockToHead(temp, curIndex, headIndex); + assertEquals( + "Moving head to the head of the list shopuld not change the list", + temp, dd.getBlockListHeadForTesting()); + + // check all elements of the list against the original blockInfoList + LOG.info("Checking elements of the list..."); + temp = dd.getBlockListHeadForTesting(); + assertNotNull("Head should not be null", temp); + int c = maxBlocks - 1; + while (temp != null) { + assertEquals("Expected element is not on the list", + blockInfoList.get(c--), temp); + temp = temp.getNext(0); + } + + LOG.info("Moving random blocks to the head of the list..."); + headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd); + Random rand = new Random(); + for (int i = 0; i < maxBlocks; i++) { + int j = rand.nextInt(maxBlocks); + curIndex = blockInfoList.get(j).findStorageInfo(dd); + headIndex = dd.moveBlockToHead(blockInfoList.get(j), curIndex, headIndex); + // the moved element must be at the head of the list + assertEquals("Block should be at the head of the list now.", + blockInfoList.get(j), dd.getBlockListHeadForTesting()); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index a8fd71ba49dc8..d5e0a99fe789b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -65,7 +65,6 @@ import org.apache.hadoop.hdfs.server.namenode.TestINodeFile; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.ha.HAState; -import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; @@ -97,7 +96,6 @@ import java.io.PrintWriter; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.EnumSet; import java.util.Iterator; import java.util.LinkedList; @@ -1037,8 +1035,7 @@ public void testSafeModeIBRBeforeFirstFullBR() throws Exception { // Make sure it's the first full report assertEquals(0, ds.getBlockReportCount()); bm.processReport(node, new DatanodeStorage(ds.getStorageID()), - builder.build(), - new BlockReportContext(1, 0, System.nanoTime(), 0, true)); + builder.build(), null); assertEquals(1, ds.getBlockReportCount()); // verify the storage info is correct @@ -1104,67 +1101,6 @@ public void testSafeModeWithProvidedStorageBR() throws Exception { assertEquals(1, ds1.getBlockReportCount()); } - @Test - public void testFullBR() throws Exception { - doReturn(true).when(fsn).isRunning(); - - DatanodeDescriptor node = nodes.get(0); - DatanodeStorageInfo ds = node.getStorageInfos()[0]; - node.setAlive(true); - DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, ""); - - // register new node - bm.getDatanodeManager().registerDatanode(nodeReg); - bm.getDatanodeManager().addDatanode(node); - assertEquals(node, bm.getDatanodeManager().getDatanode(node)); - assertEquals(0, ds.getBlockReportCount()); - - ArrayList blocks = new ArrayList<>(); - for (int id = 24; id > 0; id--) { - blocks.add(addBlockToBM(id)); - } - - // Make sure it's the first full report - assertEquals(0, ds.getBlockReportCount()); - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), - generateReport(blocks), - new BlockReportContext(1, 0, System.nanoTime(), 0, false)); - assertEquals(1, ds.getBlockReportCount()); - // verify the storage info is correct - for (BlockInfo block : blocks) { - assertTrue(bm.getStoredBlock(block).findStorageInfo(ds) >= 0); - } - - // Send unsorted report - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), - generateReport(blocks), - new BlockReportContext(1, 0, System.nanoTime(), 0, false)); - assertEquals(2, ds.getBlockReportCount()); - // verify the storage info is correct - for (BlockInfo block : blocks) { - assertTrue(bm.getStoredBlock(block).findStorageInfo(ds) >= 0); - } - - // Sort list and send a sorted report - Collections.sort(blocks); - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), - generateReport(blocks), - new BlockReportContext(1, 0, System.nanoTime(), 0, true)); - assertEquals(3, ds.getBlockReportCount()); - // verify the storage info is correct - for (BlockInfo block : blocks) { - assertTrue(bm.getStoredBlock(block).findStorageInfo(ds) >= 0); - } - } - - private BlockListAsLongs generateReport(List blocks) { - BlockListAsLongs.Builder builder = BlockListAsLongs.builder(); - for (BlockInfo block : blocks) { - builder.add(new FinalizedReplica(block, null, null)); - } - return builder.build(); - } - @Test public void testUCBlockNotConsideredMissing() throws Exception { DatanodeDescriptor node = nodes.get(0); @@ -1695,8 +1631,8 @@ private void verifyPlacementPolicy(final MiniDFSCluster cluster, LocatedBlock lb = DFSTestUtil.getAllBlocks(dfs, file).get(0); BlockInfo blockInfo = blockManager.getStoredBlock(lb.getBlock().getLocalBlock()); - Iterator itr = blockInfo.getStorageInfos(); LOG.info("Block " + blockInfo + " storages: "); + Iterator itr = blockInfo.getStorageInfos(); while (itr.hasNext()) { DatanodeStorageInfo dn = itr.next(); LOG.info(" Rack: " + dn.getDatanodeDescriptor().getNetworkLocation() @@ -1971,4 +1907,4 @@ public void testValidateReconstructionWorkAndRacksNotEnough() { // validateReconstructionWork return false, need to perform resetTargets(). assertNull(work.getTargets()); } -} +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java index 40408b1924413..a5acc14edd935 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java @@ -97,14 +97,13 @@ public void testCheckBlockReportLease() throws Exception { DelayAnswer delayer = new DelayAnswer(BlockManager.LOG); doAnswer(delayer).when(spyBlockManager).processReport( any(DatanodeStorageInfo.class), - any(BlockListAsLongs.class), - any(BlockReportContext.class)); + any(BlockListAsLongs.class)); ExecutorService pool = Executors.newFixedThreadPool(1); // Trigger sendBlockReport BlockReportContext brContext = new BlockReportContext(1, 0, - rand.nextLong(), hbResponse.getFullBlockReportLeaseId(), true); + rand.nextLong(), hbResponse.getFullBlockReportLeaseId()); Future sendBRfuturea = pool.submit(() -> { // Build every storage with 100 blocks for sending report DatanodeStorage[] datanodeStorages diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java index 43f3243b5f698..19b845007bf47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java @@ -46,6 +46,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.HashSet; +import java.util.Iterator; import java.util.Set; public class TestReconstructStripedBlocksWithRackAwareness { @@ -172,7 +173,9 @@ public void testReconstructForNotEnoughRacks() throws Exception { // we now should have 9 internal blocks distributed in 5 racks Set rackSet = new HashSet<>(); - for (DatanodeStorageInfo storage : blockInfo.storages) { + Iterator it = blockInfo.getStorageInfos(); + while (it.hasNext()){ + DatanodeStorageInfo storage = it.next(); rackSet.add(storage.getDatanodeDescriptor().getNetworkLocation()); } Assert.assertEquals("rackSet size is wrong: " + rackSet, dataBlocks - 1, @@ -203,7 +206,9 @@ public void testReconstructForNotEnoughRacks() throws Exception { // check if redundancy monitor correctly schedule the reconstruction work. boolean scheduled = false; for (int i = 0; i < 5; i++) { // retry 5 times - for (DatanodeStorageInfo storage : blockInfo.storages) { + it = blockInfo.getStorageInfos(); + while (it.hasNext()){ + DatanodeStorageInfo storage = it.next(); if (storage != null) { DatanodeDescriptor dn = storage.getDatanodeDescriptor(); Assert.assertEquals("Block to be erasure coded is wrong for datanode:" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index c4398ab1fda47..b56fc8143235d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -1516,7 +1516,7 @@ public StorageReport[] getStorageReports(String bpid) { } @Override - public List getSortedFinalizedBlocks(String bpid) { + public List getFinalizedBlocks(String bpid) { throw new UnsupportedOperationException(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java index fb65d0e2227e5..958b3e46cf46e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java @@ -19,8 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -118,13 +116,12 @@ public void testBlockHasMultipleReplicasOnSameDN() throws IOException { StorageBlockReport reports[] = new StorageBlockReport[cluster.getStoragesPerDatanode()]; - ArrayList blocks = new ArrayList<>(); + ArrayList blocks = new ArrayList<>(); for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) { Block localBlock = locatedBlock.getBlock().getLocalBlock(); blocks.add(new FinalizedReplica(localBlock, null, null)); } - Collections.sort(blocks); try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) { @@ -137,7 +134,7 @@ public void testBlockHasMultipleReplicasOnSameDN() throws IOException { // Should not assert! cluster.getNameNodeRpc().blockReport(dnReg, bpid, reports, - new BlockReportContext(1, 0, System.nanoTime(), 0L, true)); + new BlockReportContext(1, 0, System.nanoTime(), 0L)); // Get the block locations once again. locatedBlocks = client.getLocatedBlocks(filename, 0, BLOCK_SIZE * NUM_BLOCKS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestLargeBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestLargeBlockReport.java index 21e264af198e6..1ea52a0cffe40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestLargeBlockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestLargeBlockReport.java @@ -58,7 +58,6 @@ public class TestLargeBlockReport { private DatanodeStorage dnStorage; private final long reportId = 1; private final long fullBrLeaseId = 0; - private final boolean sorted = true; @BeforeClass public static void init() { @@ -84,7 +83,7 @@ public void testBlockReportExceedsLengthLimit() throws Exception { StorageBlockReport[] reports = createReports(6000000); try { nnProxy.blockReport(bpRegistration, bpId, reports, - new BlockReportContext(1, 0, reportId, fullBrLeaseId, sorted)); + new BlockReportContext(1, 0, reportId, fullBrLeaseId)); fail("Should have failed because of the too long RPC data length"); } catch (Exception e) { // Expected. We can't reliably assert anything about the exception type @@ -99,7 +98,7 @@ public void testBlockReportSucceedsWithLargerLengthLimit() throws Exception { initCluster(); StorageBlockReport[] reports = createReports(6000000); nnProxy.blockReport(bpRegistration, bpId, reports, - new BlockReportContext(1, 0, reportId, fullBrLeaseId, sorted)); + new BlockReportContext(1, 0, reportId, fullBrLeaseId)); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java index 791ee20190f88..b1742cfa8d92a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java @@ -39,7 +39,7 @@ protected void sendBlockReports(DatanodeRegistration dnR, String poolId, LOG.info("Sending block report for storage " + report.getStorage().getStorageID()); StorageBlockReport[] singletonReport = { report }; cluster.getNameNodeRpc().blockReport(dnR, poolId, singletonReport, - new BlockReportContext(reports.length, i, System.nanoTime(), 0L, true)); + new BlockReportContext(reports.length, i, System.nanoTime(), 0L)); i++; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesCombinedBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesCombinedBlockReport.java index a35fa48932503..fd19ba6f0b72f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesCombinedBlockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesCombinedBlockReport.java @@ -36,6 +36,6 @@ protected void sendBlockReports(DatanodeRegistration dnR, String poolId, StorageBlockReport[] reports) throws IOException { LOG.info("Sending combined block reports for " + dnR); cluster.getNameNodeRpc().blockReport(dnR, poolId, reports, - new BlockReportContext(1, 0, System.nanoTime(), 0L, true)); + new BlockReportContext(1, 0, System.nanoTime(), 0L)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java index 2c27cc1f2b794..3fbd4de721260 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java @@ -91,7 +91,7 @@ public Map getVolumeInfoMap() { } @Override - public List getSortedFinalizedBlocks(String bpid) { + public List getFinalizedBlocks(String bpid) { return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index dbb95487853c6..9b659d9150654 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -88,7 +88,6 @@ import java.io.Writer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.HashSet; import java.util.List; @@ -774,41 +773,6 @@ public void testAddVolumeFailureReleasesInUseLock() throws IOException { FsDatasetTestUtil.assertFileLockReleased(badDir.toString()); } - - @Test - /** - * This test is here primarily to catch any case where the datanode replica - * map structure is changed to a new structure which is not sorted and hence - * reading the blocks from it directly would not be sorted. - */ - public void testSortedFinalizedBlocksAreSorted() throws IOException { - this.conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); - try { - cluster.waitActive(); - DataNode dn = cluster.getDataNodes().get(0); - - FsDatasetSpi ds = DataNodeTestUtils.getFSDataset(dn); - ds.addBlockPool(BLOCKPOOL, conf); - - // Load 1000 blocks with random blockIDs - for (int i=0; i<=1000; i++) { - ExtendedBlock eb = new ExtendedBlock( - BLOCKPOOL, new Random().nextInt(), 1000, 1000 + i); - cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb); - } - // Get the sorted blocks and validate the arrayList is sorted - List replicaList = ds.getSortedFinalizedBlocks(BLOCKPOOL); - for (int i=0; i 0) { - // Not sorted so fail the test - fail("ArrayList is not sorted, and it should be"); - } - } - } finally { - cluster.shutdown(); - } - } @Test public void testDeletingBlocks() throws IOException { @@ -1766,7 +1730,7 @@ public void testNotifyNamenodeMissingOrNewBlock() throws Exception { DataNode dn = cluster.getDataNodes().get(0); FsDatasetSpi fsdataset = dn.getFSDataset(); List replicaInfos = - fsdataset.getSortedFinalizedBlocks(bpid); + fsdataset.getFinalizedBlocks(bpid); assertEquals(1, replicaInfos.size()); ReplicaInfo replicaInfo = replicaInfos.get(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 542f14964f81d..e861a34fc033d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -958,7 +958,7 @@ void register() throws IOException { new StorageBlockReport(storage, BlockListAsLongs.EMPTY) }; dataNodeProto.blockReport(dnRegistration, bpid, reports, - new BlockReportContext(1, 0, System.nanoTime(), 0L, true)); + new BlockReportContext(1, 0, System.nanoTime(), 0L)); } /** @@ -1240,7 +1240,7 @@ long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { StorageBlockReport[] report = { new StorageBlockReport( dn.storage, dn.getBlockReportList()) }; dataNodeProto.blockReport(dn.dnRegistration, bpid, report, - new BlockReportContext(1, 0, System.nanoTime(), 0L, true)); + new BlockReportContext(1, 0, System.nanoTime(), 0L)); long end = Time.now(); return end-start; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index ef91fc3a0e9f1..aec681176bb8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; -import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; @@ -351,8 +350,7 @@ public void testAddUCReplica() throws Exception { StorageBlockReport[] reports = {new StorageBlockReport(storage, bll)}; cluster.getNameNodeRpc().blockReport(dn.getDNRegistrationForBP(bpId), - bpId, reports, - new BlockReportContext(1, 0, System.nanoTime(), 0, true)); + bpId, reports, null); } DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index 16f4de96877cf..01ea148a5600e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -127,7 +127,7 @@ public void testDeadDatanode() throws Exception { BlockListAsLongs.EMPTY) }; try { dnp.blockReport(reg, poolId, report, - new BlockReportContext(1, 0, System.nanoTime(), 0L, true)); + new BlockReportContext(1, 0, System.nanoTime(), 0L)); fail("Expected IOException is not thrown"); } catch (IOException ex) { // Expected diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/FoldedTreeSetTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/FoldedTreeSetTest.java deleted file mode 100644 index d554b1b1b71de..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/FoldedTreeSetTest.java +++ /dev/null @@ -1,644 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.util; - -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import static org.junit.Assert.*; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Comparator; -import java.util.Iterator; -import java.util.Random; - -/** - * Test of TreeSet - */ -public class FoldedTreeSetTest { - - private static Random srand; - - public FoldedTreeSetTest() { - } - - @BeforeClass - public static void setUpClass() { - long seed = System.nanoTime(); - System.out.println("This run uses the random seed " + seed); - srand = new Random(seed); - } - - @AfterClass - public static void tearDownClass() { - } - - @Before - public void setUp() { - } - - @After - public void tearDown() { - } - - /** - * Test of comparator method, of class TreeSet. - */ - @Test - public void testComparator() { - Comparator comparator = new Comparator() { - - @Override - public int compare(String o1, String o2) { - return o1.compareTo(o2); - } - }; - assertEquals(null, new FoldedTreeSet<>().comparator()); - assertEquals(comparator, new FoldedTreeSet<>(comparator).comparator()); - - FoldedTreeSet set = new FoldedTreeSet<>(comparator); - set.add("apa3"); - set.add("apa2"); - set.add("apa"); - set.add("apa5"); - set.add("apa4"); - assertEquals(5, set.size()); - assertEquals("apa", set.get("apa")); - } - - /** - * Test of first method, of class TreeSet. - */ - @Test - public void testFirst() { - FoldedTreeSet tree = new FoldedTreeSet<>(); - for (int i = 0; i < 256; i++) { - tree.add(1024 + i); - assertEquals(1024, tree.first().intValue()); - } - for (int i = 1; i < 256; i++) { - tree.remove(1024 + i); - assertEquals(1024, tree.first().intValue()); - } - } - - /** - * Test of last method, of class TreeSet. - */ - @Test - public void testLast() { - FoldedTreeSet tree = new FoldedTreeSet<>(); - for (int i = 0; i < 256; i++) { - tree.add(1024 + i); - assertEquals(1024 + i, tree.last().intValue()); - } - for (int i = 0; i < 255; i++) { - tree.remove(1024 + i); - assertEquals(1279, tree.last().intValue()); - } - } - - /** - * Test of size method, of class TreeSet. - */ - @Test - public void testSize() { - FoldedTreeSet instance = new FoldedTreeSet<>(); - String entry = "apa"; - assertEquals(0, instance.size()); - instance.add(entry); - assertEquals(1, instance.size()); - instance.remove(entry); - assertEquals(0, instance.size()); - } - - /** - * Test of isEmpty method, of class TreeSet. - */ - @Test - public void testIsEmpty() { - FoldedTreeSet instance = new FoldedTreeSet<>(); - boolean expResult = true; - boolean result = instance.isEmpty(); - assertEquals(expResult, result); - instance.add("apa"); - instance.remove("apa"); - assertEquals(expResult, result); - } - - /** - * Test of contains method, of class TreeSet. - */ - @Test - public void testContains() { - FoldedTreeSet instance = new FoldedTreeSet<>(); - String entry = "apa"; - assertEquals(false, instance.contains(entry)); - instance.add(entry); - assertEquals(true, instance.contains(entry)); - assertEquals(false, instance.contains(entry + entry)); - } - - /** - * Test of iterator method, of class TreeSet. - */ - @Test - public void testIterator() { - - for (int iter = 0; iter < 10; iter++) { - FoldedTreeSet set = new FoldedTreeSet<>(); - long[] longs = new long[64723]; - for (int i = 0; i < longs.length; i++) { - Holder val = new Holder(srand.nextLong()); - while (set.contains(val)) { - val = new Holder(srand.nextLong()); - } - longs[i] = val.getId(); - set.add(val); - } - assertEquals(longs.length, set.size()); - Arrays.sort(longs); - - Iterator it = set.iterator(); - for (int i = 0; i < longs.length; i++) { - assertTrue(it.hasNext()); - Holder val = it.next(); - assertEquals(longs[i], val.getId()); - // remove randomly to force non linear removes - if (srand.nextBoolean()) { - it.remove(); - } - } - } - } - - /** - * Test of toArray method, of class TreeSet. - */ - @Test - public void testToArray() { - FoldedTreeSet tree = new FoldedTreeSet<>(); - ArrayList list = new ArrayList<>(256); - for (int i = 0; i < 256; i++) { - list.add(1024 + i); - } - tree.addAll(list); - assertArrayEquals(list.toArray(), tree.toArray()); - } - - /** - * Test of toArray method, of class TreeSet. - */ - @Test - public void testToArray_GenericType() { - FoldedTreeSet tree = new FoldedTreeSet<>(); - ArrayList list = new ArrayList<>(256); - for (int i = 0; i < 256; i++) { - list.add(1024 + i); - } - tree.addAll(list); - assertArrayEquals(list.toArray(new Integer[tree.size()]), tree.toArray(new Integer[tree.size()])); - assertArrayEquals(list.toArray(new Integer[tree.size() + 100]), tree.toArray(new Integer[tree.size() + 100])); - } - - /** - * Test of add method, of class TreeSet. - */ - @Test - public void testAdd() { - FoldedTreeSet simpleSet = new FoldedTreeSet<>(); - String entry = "apa"; - assertTrue(simpleSet.add(entry)); - assertFalse(simpleSet.add(entry)); - - FoldedTreeSet intSet = new FoldedTreeSet<>(); - for (int i = 512; i < 1024; i++) { - assertTrue(intSet.add(i)); - } - for (int i = -1024; i < -512; i++) { - assertTrue(intSet.add(i)); - } - for (int i = 0; i < 512; i++) { - assertTrue(intSet.add(i)); - } - for (int i = -512; i < 0; i++) { - assertTrue(intSet.add(i)); - } - assertEquals(2048, intSet.size()); - - FoldedTreeSet set = new FoldedTreeSet<>(); - long[] longs = new long[23432]; - for (int i = 0; i < longs.length; i++) { - Holder val = new Holder(srand.nextLong()); - while (set.contains(val)) { - val = new Holder(srand.nextLong()); - } - longs[i] = val.getId(); - assertTrue(set.add(val)); - } - assertEquals(longs.length, set.size()); - Arrays.sort(longs); - - Iterator it = set.iterator(); - for (int i = 0; i < longs.length; i++) { - assertTrue(it.hasNext()); - Holder val = it.next(); - assertEquals(longs[i], val.getId()); - } - - // Specially constructed adds to exercise all code paths - FoldedTreeSet specialAdds = new FoldedTreeSet<>(); - // Fill node with even numbers - for (int i = 0; i < 128; i += 2) { - assertTrue(specialAdds.add(i)); - } - // Remove left and add left - assertTrue(specialAdds.remove(0)); - assertTrue(specialAdds.add(-1)); - assertTrue(specialAdds.remove(-1)); - // Add right and shift everything left - assertTrue(specialAdds.add(127)); - assertTrue(specialAdds.remove(127)); - - // Empty at both ends - assertTrue(specialAdds.add(0)); - assertTrue(specialAdds.remove(0)); - assertTrue(specialAdds.remove(126)); - // Add in the middle left to slide entries left - assertTrue(specialAdds.add(11)); - assertTrue(specialAdds.remove(11)); - // Add in the middle right to slide entries right - assertTrue(specialAdds.add(99)); - assertTrue(specialAdds.remove(99)); - // Add existing entry in the middle of a node - assertFalse(specialAdds.add(64)); - } - - @Test - public void testAddOrReplace() { - FoldedTreeSet simpleSet = new FoldedTreeSet<>(); - String entry = "apa"; - assertNull(simpleSet.addOrReplace(entry)); - assertEquals(entry, simpleSet.addOrReplace(entry)); - - FoldedTreeSet intSet = new FoldedTreeSet<>(); - for (int i = 0; i < 1024; i++) { - assertNull(intSet.addOrReplace(i)); - } - for (int i = 0; i < 1024; i++) { - assertEquals(i, intSet.addOrReplace(i).intValue()); - } - } - - private static class Holder implements Comparable { - - private final long id; - - public Holder(long id) { - this.id = id; - } - - public long getId() { - return id; - } - - @Override - public int compareTo(Holder o) { - return id < o.getId() ? -1 - : id > o.getId() ? 1 : 0; - } - } - - @Test - public void testRemoveWithComparator() { - FoldedTreeSet set = new FoldedTreeSet<>(); - long[] longs = new long[98327]; - for (int i = 0; i < longs.length; i++) { - Holder val = new Holder(srand.nextLong()); - while (set.contains(val)) { - val = new Holder(srand.nextLong()); - } - longs[i] = val.getId(); - set.add(val); - } - assertEquals(longs.length, set.size()); - Comparator cmp = new Comparator() { - @Override - public int compare(Object o1, Object o2) { - long lookup = (long) o1; - long stored = ((Holder) o2).getId(); - return lookup < stored ? -1 - : lookup > stored ? 1 : 0; - } - }; - - for (long val : longs) { - set.remove(val, cmp); - } - assertEquals(0, set.size()); - assertTrue(set.isEmpty()); - } - - @Test - public void testGetWithComparator() { - FoldedTreeSet set = new FoldedTreeSet<>(); - long[] longs = new long[32147]; - for (int i = 0; i < longs.length; i++) { - Holder val = new Holder(srand.nextLong()); - while (set.contains(val)) { - val = new Holder(srand.nextLong()); - } - longs[i] = val.getId(); - set.add(val); - } - assertEquals(longs.length, set.size()); - Comparator cmp = new Comparator() { - @Override - public int compare(Object o1, Object o2) { - long lookup = (long) o1; - long stored = ((Holder) o2).getId(); - return lookup < stored ? -1 - : lookup > stored ? 1 : 0; - } - }; - - for (long val : longs) { - assertEquals(val, set.get(val, cmp).getId()); - } - } - - @Test - public void testGet() { - FoldedTreeSet set = new FoldedTreeSet<>(); - long[] longs = new long[43277]; - for (int i = 0; i < longs.length; i++) { - Holder val = new Holder(srand.nextLong()); - while (set.contains(val)) { - val = new Holder(srand.nextLong()); - } - longs[i] = val.getId(); - set.add(val); - } - assertEquals(longs.length, set.size()); - - for (long val : longs) { - assertEquals(val, set.get(new Holder(val)).getId()); - } - } - - /** - * Test of remove method, of class TreeSet. - */ - @Test - public void testRemove() { - FoldedTreeSet instance = new FoldedTreeSet<>(); - assertEquals(false, instance.remove("apa")); - instance.add("apa"); - assertEquals(true, instance.remove("apa")); - - removeLeft(); - removeRight(); - removeAt(); - removeRandom(); - } - - public void removeLeft() { - FoldedTreeSet set = new FoldedTreeSet<>(); - for (int i = 1; i <= 320; i++) { - set.add(i); - } - for (int i = 193; i < 225; i++) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - for (int i = 129; i < 161; i++) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - for (int i = 256; i > 224; i--) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - for (int i = 257; i < 289; i++) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - while (!set.isEmpty()) { - assertTrue(set.remove(set.first())); - } - } - - public void removeRight() { - FoldedTreeSet set = new FoldedTreeSet<>(); - for (int i = 1; i <= 320; i++) { - set.add(i); - } - for (int i = 193; i < 225; i++) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - for (int i = 192; i > 160; i--) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - for (int i = 256; i > 224; i--) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - for (int i = 320; i > 288; i--) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - while (!set.isEmpty()) { - assertTrue(set.remove(set.last())); - } - } - - public void removeAt() { - FoldedTreeSet set = new FoldedTreeSet<>(); - for (int i = 1; i <= 320; i++) { - set.add(i); - } - for (int i = 193; i < 225; i++) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - for (int i = 160; i < 192; i++) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - for (int i = 225; i < 257; i++) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - for (int i = 288; i < 320; i++) { - assertEquals(true, set.remove(i)); - assertEquals(false, set.remove(i)); - } - } - - public void removeRandom() { - FoldedTreeSet set = new FoldedTreeSet<>(); - int[] integers = new int[2048]; - for (int i = 0; i < 2048; i++) { - int val = srand.nextInt(); - while (set.contains(val)) { - val = srand.nextInt(); - } - integers[i] = val; - set.add(val); - } - assertEquals(2048, set.size()); - - for (int val : integers) { - assertEquals(true, set.remove(val)); - assertEquals(false, set.remove(val)); - } - assertEquals(true, set.isEmpty()); - } - - /** - * Test of containsAll method, of class TreeSet. - */ - @Test - public void testContainsAll() { - Collection list = Arrays.asList(new String[]{"apa", "apa2", "apa"}); - FoldedTreeSet instance = new FoldedTreeSet<>(); - assertEquals(false, instance.containsAll(list)); - instance.addAll(list); - assertEquals(true, instance.containsAll(list)); - } - - /** - * Test of addAll method, of class TreeSet. - */ - @Test - public void testAddAll() { - Collection list = Arrays.asList(new String[]{"apa", "apa2", "apa"}); - FoldedTreeSet instance = new FoldedTreeSet<>(); - assertEquals(true, instance.addAll(list)); - assertEquals(false, instance.addAll(list)); // add same entries again - } - - /** - * Test of retainAll method, of class TreeSet. - */ - @Test - public void testRetainAll() { - Collection list = Arrays.asList(new String[]{"apa", "apa2", "apa"}); - FoldedTreeSet instance = new FoldedTreeSet<>(); - instance.addAll(list); - assertEquals(false, instance.retainAll(list)); - assertEquals(2, instance.size()); - Collection list2 = Arrays.asList(new String[]{"apa"}); - assertEquals(true, instance.retainAll(list2)); - assertEquals(1, instance.size()); - } - - /** - * Test of removeAll method, of class TreeSet. - */ - @Test - public void testRemoveAll() { - Collection list = Arrays.asList(new String[]{"apa", "apa2", "apa"}); - FoldedTreeSet instance = new FoldedTreeSet<>(); - assertEquals(false, instance.removeAll(list)); - instance.addAll(list); - assertEquals(true, instance.removeAll(list)); - assertEquals(true, instance.isEmpty()); - } - - /** - * Test of clear method, of class TreeSet. - */ - @Test - public void testClear() { - FoldedTreeSet instance = new FoldedTreeSet<>(); - instance.clear(); - assertEquals(true, instance.isEmpty()); - instance.add("apa"); - assertEquals(false, instance.isEmpty()); - instance.clear(); - assertEquals(true, instance.isEmpty()); - } - - @Test - public void testFillRatio() { - FoldedTreeSet set = new FoldedTreeSet<>(); - final int size = 1024; - for (int i = 1; i <= size; i++) { - set.add(i); - assertEquals("Iteration: " + i, 1.0, set.fillRatio(), 0.0); - } - - for (int i = 1; i <= size / 2; i++) { - set.remove(i * 2); - // Need the max since all the removes from the last node doesn't - // affect the fill ratio - assertEquals("Iteration: " + i, - Math.max((size - i) / (double) size, 0.53125), - set.fillRatio(), 0.0); - } - } - - @Test - public void testCompact() { - FoldedTreeSet set = new FoldedTreeSet<>(); - long[] longs = new long[24553]; - for (int i = 0; i < longs.length; i++) { - Holder val = new Holder(srand.nextLong()); - while (set.contains(val)) { - val = new Holder(srand.nextLong()); - } - longs[i] = val.getId(); - set.add(val); - } - assertEquals(longs.length, set.size()); - - long[] longs2 = new long[longs.length]; - for (int i = 0; i < longs2.length; i++) { - Holder val = new Holder(srand.nextLong()); - while (set.contains(val)) { - val = new Holder(srand.nextLong()); - } - longs2[i] = val.getId(); - set.add(val); - } - assertEquals(longs.length + longs2.length, set.size()); - - // Create fragementation - for (long val : longs) { - assertTrue(set.remove(new Holder(val))); - } - assertEquals(longs2.length, set.size()); - - assertFalse(set.compact(0)); - assertTrue(set.compact(Long.MAX_VALUE)); - assertEquals(longs2.length, set.size()); - for (long val : longs) { - assertFalse(set.remove(new Holder(val))); - } - for (long val : longs2) { - assertEquals(val, set.get(new Holder(val)).getId()); - } - } -} From 6c6afec92d67d1f3b865562c459e77064a85dc59 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 16 Jun 2021 20:37:56 -0700 Subject: [PATCH 424/666] HDFS-16074. Remove an expensive debug string concatenation (#3107) Reviewed-by: Ayush Saxena Reviewed-by: Akira Ajisaka Reviewed-by: Inigo Goiri Reviewed-by: Hui Fei Reviewed-by: Viraj Jasani Reviewed-by: litao --- .../main/java/org/apache/hadoop/hdfs/DFSOutputStream.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index dba9e939d8ed3..fea5551f91be6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -483,9 +483,10 @@ private synchronized void writeChunkPrepare(int buflen, currentPacket = createPacket(packetSize, chunksPerPacket, getStreamer() .getBytesCurBlock(), getStreamer().getAndIncCurrentSeqno(), false); DFSClient.LOG.debug("WriteChunk allocating new packet seqno={}," - + " src={}, packetSize={}, chunksPerPacket={}, bytesCurBlock={}", + + " src={}, packetSize={}, chunksPerPacket={}, bytesCurBlock={}," + + " output stream={}", currentPacket.getSeqno(), src, packetSize, chunksPerPacket, - getStreamer().getBytesCurBlock() + ", " + this); + getStreamer().getBytesCurBlock(), this); } } From 42f80ed7ef8008089a5b28bf5d52328ab0017839 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Thu, 17 Jun 2021 22:44:44 +0530 Subject: [PATCH 425/666] HADOOP-17724. Add Dockerfile for Debian 10 (#3038) --- dev-support/docker/Dockerfile_debian_10 | 107 ++++++++++++++++++ dev-support/docker/pkg-resolver/packages.json | 64 +++++++++++ .../docker/pkg-resolver/platforms.json | 3 +- 3 files changed, 173 insertions(+), 1 deletion(-) create mode 100644 dev-support/docker/Dockerfile_debian_10 diff --git a/dev-support/docker/Dockerfile_debian_10 b/dev-support/docker/Dockerfile_debian_10 new file mode 100644 index 0000000000000..8b7cbde7e624d --- /dev/null +++ b/dev-support/docker/Dockerfile_debian_10 @@ -0,0 +1,107 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Dockerfile for installing the necessary dependencies for building Hadoop. +# See BUILDING.txt. + +FROM debian:10 + +WORKDIR /root + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +##### +# Disable suggests/recommends +##### +RUN echo APT::Install-Recommends "0"\; > /etc/apt/apt.conf.d/10disableextras +RUN echo APT::Install-Suggests "0"\; >> /etc/apt/apt.conf.d/10disableextras + +##### +# For installing the latest packages +##### +RUN echo 'deb http://deb.debian.org/debian testing main' >> /etc/apt/sources.list + +ENV DEBIAN_FRONTEND noninteractive +ENV DEBCONF_TERSE true + +###### +# Platform package dependency resolver +###### +COPY pkg-resolver pkg-resolver +RUN chmod a+x pkg-resolver/*.sh pkg-resolver/*.py \ + && chmod a+r pkg-resolver/*.json + +###### +# Install packages from apt +###### +# hadolint ignore=DL3008,SC2046 +RUN apt-get -q update \ + && apt-get -q install -y --no-install-recommends python3 \ + && apt-get -q install -y --no-install-recommends $(pkg-resolver/resolve.py debian:10) \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +RUN locale-gen en_US.UTF-8 +ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8' +ENV PYTHONIOENCODING=utf-8 + +###### +# Set env vars required to build Hadoop +###### +ENV MAVEN_HOME /usr +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-11-openjdk-amd64 + +####### +# Set env vars for SpotBugs 4.2.2 +####### +ENV SPOTBUGS_HOME /opt/spotbugs + +####### +# Set env vars for Google Protobuf 3.7.1 +####### +ENV PROTOBUF_HOME /opt/protobuf +ENV PATH "${PATH}:/opt/protobuf/bin" + +### +# Avoid out of memory errors in builds +### +ENV MAVEN_OPTS -Xms256m -Xmx3072m + +# Skip gpg verification when downloading Yetus via yetus-wrapper +ENV HADOOP_SKIP_YETUS_VERIFICATION true + +#### +# Install packages +#### +RUN pkg-resolver/install-common-pkgs.sh +RUN pkg-resolver/install-spotbugs.sh debian:10 +RUN pkg-resolver/install-boost.sh debian:10 +RUN pkg-resolver/install-protobuf.sh debian:10 +RUN pkg-resolver/install-hadolint.sh debian:10 +RUN pkg-resolver/install-intel-isa-l.sh debian:10 + +### +# Everything past this point is either not needed for testing or breaks Yetus. +# So tell Yetus not to read the rest of the file: +# YETUS CUT HERE +### + +# Add a welcome message and environment checks. +COPY hadoop_env_checks.sh /root/hadoop_env_checks.sh +RUN chmod 755 /root/hadoop_env_checks.sh +# hadolint ignore=SC2016 +RUN echo '${HOME}/hadoop_env_checks.sh' >> /root/.bashrc diff --git a/dev-support/docker/pkg-resolver/packages.json b/dev-support/docker/pkg-resolver/packages.json index 4ebbc900fe4de..cb4b7914d54d1 100644 --- a/dev-support/docker/pkg-resolver/packages.json +++ b/dev-support/docker/pkg-resolver/packages.json @@ -1,30 +1,39 @@ { "ant": { + "debian:10": "ant", "ubuntu:focal": "ant", "ubuntu:focal::arch64": "ant", "centos:7": "ant", "centos:8": "ant" }, "apt-utils": { + "debian:10": "apt-utils", "ubuntu:focal": "apt-utils", "ubuntu:focal::arch64": "apt-utils" }, "automake": { + "debian:10": "automake", "ubuntu:focal": "automake", "ubuntu:focal::arch64": "automake", "centos:7": "automake", "centos:8": "automake" }, "bats": { + "debian:10": "bats", "ubuntu:focal": "bats", "ubuntu:focal::arch64": "bats" }, "build-essential": { + "debian:10": "build-essential", "ubuntu:focal": "build-essential", "ubuntu:focal::arch64": "build-essential", "centos:7": "build-essential" }, "bzip2": { + "debian:10": [ + "bzip2", + "libbz2-dev" + ], "ubuntu:focal": [ "bzip2", "libbz2-dev" @@ -43,16 +52,22 @@ ] }, "clang": { + "debian:10": "clang", "ubuntu:focal": "clang", "ubuntu:focal::arch64": "clang", "centos:7": "clang", "centos:8": "clang" }, "cmake": { + "debian:10": "cmake", "ubuntu:focal": "cmake", "ubuntu:focal::arch64": "cmake" }, "curl": { + "debian:10": [ + "curl", + "libcurl4-openssl-dev" + ], "ubuntu:focal": [ "curl", "libcurl4-openssl-dev" @@ -71,6 +86,7 @@ ] }, "doxygen": { + "debian:10": "doxygen", "ubuntu:focal": "doxygen", "ubuntu:focal::arch64": "doxygen", "centos:7": "doxygen" @@ -79,6 +95,10 @@ "centos:8": "dnf" }, "fuse": { + "debian:10": [ + "fuse", + "libfuse-dev" + ], "ubuntu:focal": [ "fuse", "libfuse-dev" @@ -99,6 +119,10 @@ ] }, "gcc": { + "debian:10": [ + "gcc", + "g++" + ], "ubuntu:focal": [ "gcc", "g++" @@ -113,36 +137,46 @@ ] }, "git": { + "debian:10": "git", "ubuntu:focal": "git", "ubuntu:focal::arch64": "git", "centos:7": "git", "centos:8": "git" }, "gnupg-agent": { + "debian:10": "gnupg-agent", "ubuntu:focal": "gnupg-agent", "ubuntu:focal::arch64": "gnupg-agent" }, "hugo": { + "debian:10": "hugo", "ubuntu:focal": "hugo", "ubuntu:focal::arch64": "hugo" }, "libbcprov-java": { + "debian:10": "libbcprov-java", "ubuntu:focal": "libbcprov-java", "ubuntu:focal::arch64": "libbcprov-java" }, "libtool": { + "debian:10": "libtool", "ubuntu:focal": "libtool", "ubuntu:focal::arch64": "libtool", "centos:7": "libtool", "centos:8": "libtool" }, "openssl": { + "debian:10": "libssl-dev", "ubuntu:focal": "libssl-dev", "ubuntu:focal::arch64": "libssl-dev", "centos:7": "openssl-devel", "centos:8": "openssl-devel" }, "protocol-buffers": { + "debian:10": [ + "libprotobuf-dev", + "libprotoc-dev" + ], "ubuntu:focal": [ "libprotobuf-dev", "libprotoc-dev" @@ -153,17 +187,23 @@ ] }, "sasl": { + "debian:10": "libsasl2-dev", "ubuntu:focal": "libsasl2-dev", "ubuntu:focal::arch64": "libsasl2-dev", "centos:7": "cyrus-sasl-devel", "centos:8": "cyrus-sasl-devel" }, "snappy": { + "debian:10": "libsnappy-dev", "ubuntu:focal": "libsnappy-dev", "ubuntu:focal::arch64": "libsnappy-dev", "centos:7": "snappy-devel" }, "zlib": { + "debian:10": [ + "libzstd-dev", + "zlib1g-dev" + ], "ubuntu:focal": [ "libzstd-dev", "zlib1g-dev" @@ -182,6 +222,7 @@ ] }, "locales": { + "debian:10": "locales", "ubuntu:focal": "locales", "ubuntu:focal::arch64": "locales" }, @@ -193,16 +234,23 @@ "centos:7": "libpmem-devel" }, "make": { + "debian:10": "make", "ubuntu:focal": "make", "ubuntu:focal::arch64": "make", "centos:7": "make", "centos:8": "make" }, "maven": { + "debian:10": "maven", "ubuntu:focal": "maven", "ubuntu:focal::arch64": "maven" }, "javascript": { + "debian:10": [ + "nodejs", + "node-yarn", + "npm" + ], "ubuntu:focal": [ "nodejs", "node-yarn", @@ -216,6 +264,7 @@ ] }, "java": { + "debian:10": "openjdk-11-jdk", "ubuntu:focal": [ "openjdk-8-jdk", "openjdk-11-jdk" @@ -226,17 +275,26 @@ ] }, "pinentry-curses": { + "debian:10": "pinentry-curses", "ubuntu:focal": "pinentry-curses", "ubuntu:focal::arch64": "pinentry-curses", "centos:7": "pinentry-curses", "centos:8": "pinentry-curses" }, "pkg-config": { + "debian:10": "pkg-config", "ubuntu:focal": "pkg-config", "ubuntu:focal::arch64": "pkg-config", "centos:8": "pkg-config" }, "python": { + "debian:10": [ + "python3", + "python3-pip", + "python3-pkg-resources", + "python3-setuptools", + "python3-wheel" + ], "ubuntu:focal": [ "python3", "python3-pip", @@ -266,12 +324,14 @@ ] }, "rsync": { + "debian:10": "rsync", "ubuntu:focal": "rsync", "ubuntu:focal::arch64": "rsync", "centos:7": "rsync", "centos:8": "rsync" }, "shellcheck": { + "debian:10": "shellcheck", "ubuntu:focal": "shellcheck", "ubuntu:focal::arch64": "shellcheck" }, @@ -280,22 +340,26 @@ "centos:8": "perl-Digest-SHA" }, "software-properties-common": { + "debian:10": "software-properties-common", "ubuntu:focal": "software-properties-common", "ubuntu:focal::arch64": "software-properties-common" }, "sudo": { + "debian:10": "sudo", "ubuntu:focal": "sudo", "ubuntu:focal::arch64": "sudo", "centos:7": "sudo", "centos:8": "sudo" }, "valgrind": { + "debian:10": "valgrind", "ubuntu:focal": "valgrind", "ubuntu:focal::arch64": "valgrind", "centos:7": "valgrind", "centos:8": "valgrind" }, "yasm": { + "debian:10": "yasm", "ubuntu:focal": "yasm", "ubuntu:focal::arch64": "yasm" } diff --git a/dev-support/docker/pkg-resolver/platforms.json b/dev-support/docker/pkg-resolver/platforms.json index 4861c5dd55c00..93e2a93df4220 100644 --- a/dev-support/docker/pkg-resolver/platforms.json +++ b/dev-support/docker/pkg-resolver/platforms.json @@ -2,5 +2,6 @@ "ubuntu:focal", "ubuntu:focal::arch64", "centos:7", - "centos:8" + "centos:8", + "debian:10" ] \ No newline at end of file From adc436048d2214059e73e545479d1150a94b4d67 Mon Sep 17 00:00:00 2001 From: LeonGao Date: Sat, 19 Jun 2021 15:39:46 -0700 Subject: [PATCH 426/666] HDFS-15842. HDFS mover to emit metrics. (#2738) --- .../hdfs/server/balancer/Dispatcher.java | 1 + .../server/balancer/NameNodeConnector.java | 9 +- .../hadoop/hdfs/server/mover/Mover.java | 21 +++++ .../hdfs/server/mover/MoverMetrics.java | 83 +++++++++++++++++++ .../hdfs/server/mover/package-info.java | 27 ++++++ .../hadoop/hdfs/server/mover/TestMover.java | 57 +++++++++++++ 6 files changed, 196 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/MoverMetrics.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/package-info.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index a9538009ddb59..fb91071666a61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -398,6 +398,7 @@ private void dispatch() { LOG.info("Successfully moved " + this); } catch (IOException e) { LOG.warn("Failed to move " + this, e); + nnc.getBlocksFailed().incrementAndGet(); target.getDDatanode().setHasFailure(); // Check that the failure is due to block pinning errors. if (e instanceof BlockPinningException) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index 4d0524276e361..7634eaf7788b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -163,6 +163,7 @@ public static void checkOtherInstanceRunning(boolean toCheck) { private final List targetPaths; private final AtomicLong bytesMoved = new AtomicLong(); private final AtomicLong blocksMoved = new AtomicLong(); + private final AtomicLong blocksFailed = new AtomicLong(); private final int maxNotChangedIterations; private int notChangedIterations = 0; @@ -230,14 +231,18 @@ public String getBlockpoolID() { return blockpoolID; } - AtomicLong getBytesMoved() { + public AtomicLong getBytesMoved() { return bytesMoved; } - AtomicLong getBlocksMoved() { + public AtomicLong getBlocksMoved() { return blocksMoved; } + public AtomicLong getBlocksFailed() { + return blocksFailed; + } + public void addBytesMoved(long numBytes) { bytesMoved.addAndGet(numBytes); blocksMoved.incrementAndGet(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index 8b9e9ed742050..dd7abf6a5890c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -42,6 +42,8 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.SecurityUtil; @@ -118,6 +120,8 @@ private List getTargetStorages(StorageType t) { private final int retryMaxAttempts; private final AtomicInteger retryCount; private final Map> excludedPinnedBlocks; + private final MoverMetrics metrics; + private final NameNodeConnector nnc; private final BlockStoragePolicy[] blockStoragePolicies; @@ -155,6 +159,8 @@ Collections. emptySet(), movedWinWidth, moverThreads, 0, this.blockStoragePolicies = new BlockStoragePolicy[1 << BlockStoragePolicySuite.ID_BIT_LENGTH]; this.excludedPinnedBlocks = excludedPinnedBlocks; + this.nnc = nnc; + this.metrics = MoverMetrics.create(this); } void init() throws IOException { @@ -196,6 +202,10 @@ private ExitStatus run() { } } + public NameNodeConnector getNnc() { + return nnc; + } + DBlock newDBlock(LocatedBlock lb, List locations, ErasureCodingPolicy ecPolicy) { Block blk = lb.getBlock().getLocalBlock(); @@ -296,6 +306,7 @@ private boolean isSnapshotPathInCurrent(String path) throws IOException { * round */ private Result processNamespace() throws IOException { + metrics.setProcessingNamespace(true); getSnapshottableDirs(); Result result = new Result(); for (Path target : targetPaths) { @@ -322,6 +333,7 @@ private Result processNamespace() throws IOException { retryCount.set(0); } result.updateHasRemaining(hasFailed); + metrics.setProcessingNamespace(false); return result; } @@ -374,6 +386,7 @@ private void processRecursively(String parent, HdfsFileStatus status, // the full path is a snapshot path but it is also included in the // current directory tree, thus ignore it. processFile(fullPath, (HdfsLocatedFileStatus) status, result); + metrics.incrFilesProcessed(); } } catch (IOException e) { LOG.warn("Failed to check the status of " + parent @@ -521,6 +534,7 @@ boolean chooseTargetInSameNode(DBlock db, Source source, final PendingMove pm = source.addPendingMove(db, target); if (pm != null) { dispatcher.executePendingMove(pm); + metrics.incrBlocksScheduled(); return true; } } @@ -539,6 +553,7 @@ boolean chooseTarget(DBlock db, Source source, final PendingMove pm = source.addPendingMove(db, target); if (pm != null) { dispatcher.executePendingMove(pm); + metrics.incrBlocksScheduled(); return true; } } @@ -650,6 +665,11 @@ static int run(Map> namenodes, Configuration conf) Map> excludedPinnedBlocks = new HashMap<>(); LOG.info("namenodes = " + namenodes); + DefaultMetricsSystem.initialize("Mover"); + JvmMetrics.create("Mover", + conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), + DefaultMetricsSystem.instance()); + checkKeytabAndInit(conf); List connectors = Collections.emptyList(); try { @@ -818,6 +838,7 @@ public int run(String[] args) throws Exception { System.out.println(e + ". Exiting ..."); return ExitStatus.ILLEGAL_ARGUMENTS.getExitCode(); } finally { + DefaultMetricsSystem.shutdown(); System.out.format("%-24s ", DateFormat.getDateTimeInstance().format(new Date())); System.out.println("Mover took " + StringUtils.formatTime(Time.monotonicNow()-startTime)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/MoverMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/MoverMetrics.java new file mode 100644 index 0000000000000..846e6f664f79a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/MoverMetrics.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.mover; + +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; + +/** + * Metrics for HDFS Mover of a blockpool. + */ +@Metrics(about="Mover metrics", context="dfs") +final class MoverMetrics { + + private final Mover mover; + + @Metric("If mover is processing namespace.") + private MutableGaugeInt processingNamespace; + + @Metric("Number of blocks being scheduled.") + private MutableCounterLong blocksScheduled; + + @Metric("Number of files being processed.") + private MutableCounterLong filesProcessed; + + private MoverMetrics(Mover m) { + this.mover = m; + } + + public static MoverMetrics create(Mover mover) { + MoverMetrics m = new MoverMetrics(mover); + return DefaultMetricsSystem.instance().register( + m.getName(), null, m); + } + + String getName() { + return "Mover-" + mover.getNnc().getBlockpoolID(); + } + + @Metric("Bytes that already moved by mover.") + public long getBytesMoved() { + return mover.getNnc().getBytesMoved().get(); + } + + @Metric("Number of blocks that successfully moved by mover.") + public long getBlocksMoved() { + return mover.getNnc().getBlocksMoved().get(); + } + + @Metric("Number of blocks that failed moved by mover.") + public long getBlocksFailed() { + return mover.getNnc().getBlocksFailed().get(); + } + + void setProcessingNamespace(boolean processingNamespace) { + this.processingNamespace.set(processingNamespace ? 1 : 0); + } + + void incrBlocksScheduled() { + this.blocksScheduled.incr(); + } + + void incrFilesProcessed() { + this.filesProcessed.incr(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/package-info.java new file mode 100644 index 0000000000000..92db7b7a9c699 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/package-info.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Mover is a data migration tool for tiered storage. + * It scans provided paths in HDFS to check + * if the block placement satisfies the storage policy. + * For the blocks violating the storage policy, + * it moves the replicas to a different storage type + * in order to fulfill the storage policy requirement. + */ +package org.apache.hadoop.hdfs.server.mover; \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index f428b2c2a8fc3..3cec73987b968 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -36,6 +36,8 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import java.io.File; import java.io.IOException; @@ -86,12 +88,15 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.ToolRunner; import org.junit.Assert; import org.junit.Test; @@ -1235,6 +1240,58 @@ public void testMoverWhenStoragePolicyUnset() throws Exception { } } + @Test(timeout=100000) + public void testMoverMetrics() throws Exception { + long blockSize = 10*1024*1024; + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + conf.setInt(DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY, 1); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.setLong(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize); + + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(2) + .storageTypes( + new StorageType[][] {{StorageType.DISK, StorageType.DISK}, + {StorageType.ARCHIVE, StorageType.ARCHIVE}}) + .build(); + + cluster.waitActive(); + final DistributedFileSystem fs = cluster.getFileSystem(); + + final String file = "/testMaxIterationTime.dat"; + final Path path = new Path(file); + short repFactor = 1; + int seed = 0xFAFAFA; + // write to DISK + DFSTestUtil.createFile(fs, path, 4L * blockSize, repFactor, seed); + + // move to ARCHIVE + fs.setStoragePolicy(new Path(file), "COLD"); + + Map> nnWithPath = new HashMap<>(); + List paths = new ArrayList<>(); + paths.add(path); + nnWithPath + .put(DFSUtil.getInternalNsRpcUris(conf).iterator().next(), paths); + + Mover.run(nnWithPath, conf); + + final String moverMetricsName = "Mover-" + + cluster.getNameNode(0).getNamesystem().getBlockPoolId(); + MetricsSource moverMetrics = + DefaultMetricsSystem.instance().getSource(moverMetricsName); + assertNotNull(moverMetrics); + + MetricsRecordBuilder rb = MetricsAsserts.getMetrics(moverMetricsName); + // Check metrics + assertEquals(4, MetricsAsserts.getLongCounter("BlocksScheduled", rb)); + assertEquals(1, MetricsAsserts.getLongCounter("FilesProcessed", rb)); + assertEquals(41943040, MetricsAsserts.getLongGauge("BytesMoved", rb)); + assertEquals(4, MetricsAsserts.getLongGauge("BlocksMoved", rb)); + assertEquals(0, MetricsAsserts.getLongGauge("BlocksFailed", rb)); + } + private void createFileWithFavoredDatanodes(final Configuration conf, final MiniDFSCluster cluster, final DistributedFileSystem dfs) throws IOException { From d0f7bb6d28017cd00eafe626bff4b9ac5e786daf Mon Sep 17 00:00:00 2001 From: litao Date: Sun, 20 Jun 2021 12:50:52 +0800 Subject: [PATCH 427/666] HDFS-16078. Remove unused parameters for DatanodeManager.handleLifeline() (#3119). Contributed by tomscut. Signed-off-by: Wei-Chiu Chuang Signed-off-by: Ayush Saxena --- .../hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 6 ++---- .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 +-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 67811c98caf5c..2e7b338d8578c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1876,18 +1876,16 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, * * @param nodeReg registration info for DataNode sending the lifeline * @param reports storage reports from DataNode - * @param blockPoolId block pool ID * @param cacheCapacity cache capacity at DataNode * @param cacheUsed cache used at DataNode * @param xceiverCount estimated count of transfer threads running at DataNode - * @param maxTransfers count of transfers running at DataNode * @param failedVolumes count of failed volumes at DataNode * @param volumeFailureSummary info on failed volumes at DataNode * @throws IOException if there is an error */ public void handleLifeline(DatanodeRegistration nodeReg, - StorageReport[] reports, String blockPoolId, long cacheCapacity, - long cacheUsed, int xceiverCount, int maxTransfers, int failedVolumes, + StorageReport[] reports, long cacheCapacity, + long cacheUsed, int xceiverCount, int failedVolumes, VolumeFailureSummary volumeFailureSummary) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Received handleLifeline from nodeReg = " + nodeReg); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 32da3c793c35f..7ccaae9773e1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4416,9 +4416,8 @@ void handleLifeline(DatanodeRegistration nodeReg, StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int xmitsInProgress, int failedVolumes, VolumeFailureSummary volumeFailureSummary) throws IOException { - int maxTransfer = blockManager.getMaxReplicationStreams() - xmitsInProgress; blockManager.getDatanodeManager().handleLifeline(nodeReg, reports, - getBlockPoolId(), cacheCapacity, cacheUsed, xceiverCount, maxTransfer, + cacheCapacity, cacheUsed, xceiverCount, failedVolumes, volumeFailureSummary); } From 1aed11219b4ccb8a2c1255fd624020bd84eda6c6 Mon Sep 17 00:00:00 2001 From: Ahmed Hussein <50450311+amahussein@users.noreply.github.com> Date: Sat, 19 Jun 2021 23:52:47 -0500 Subject: [PATCH 428/666] HDFS-16061. DFTestUtil.waitReplication can produce false positives (#3095). Contributed by Ahmed Hussein. Reviewed-by: Jim Brennan Signed-off-by: Ayush Saxena --- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 51 +++++++++++-------- .../server/balancer/TestBalancerRPCDelay.java | 8 ++- 2 files changed, 35 insertions(+), 24 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 74082a21707c7..d813375e2748f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -794,41 +794,48 @@ public String[] getFileNames(String topDir) { /** * Wait for the given file to reach the given replication factor. - * @throws TimeoutException if we fail to sufficiently replicate the file + * + * @param fs the defined filesystem. + * @param fileName being written. + * @param replFactor desired replication + * @throws IOException getting block locations + * @throws InterruptedException during sleep + * @throws TimeoutException if 40 seconds passed before reaching the desired + * replication. */ - public static void waitReplication(FileSystem fs, Path fileName, short replFactor) + public static void waitReplication(FileSystem fs, Path fileName, + short replFactor) throws IOException, InterruptedException, TimeoutException { boolean correctReplFactor; - final int ATTEMPTS = 40; - int count = 0; - + int attempt = 0; do { correctReplFactor = true; + if (attempt++ > 0) { + Thread.sleep(1000); + } BlockLocation locs[] = fs.getFileBlockLocations( - fs.getFileStatus(fileName), 0, Long.MAX_VALUE); - count++; - for (int j = 0; j < locs.length; j++) { - String[] hostnames = locs[j].getNames(); + fs.getFileStatus(fileName), 0, Long.MAX_VALUE); + for (int currLoc = 0; currLoc < locs.length; currLoc++) { + String[] hostnames = locs[currLoc].getNames(); if (hostnames.length != replFactor) { + LOG.info( + "Block {} of file {} has replication factor {} " + + "(desired {}); locations: {}", + currLoc, fileName, hostnames.length, replFactor, + Joiner.on(' ').join(hostnames)); correctReplFactor = false; - System.out.println("Block " + j + " of file " + fileName - + " has replication factor " + hostnames.length - + " (desired " + replFactor + "); locations " - + Joiner.on(' ').join(hostnames)); - Thread.sleep(1000); break; } } - if (correctReplFactor) { - System.out.println("All blocks of file " + fileName - + " verified to have replication factor " + replFactor); - } - } while (!correctReplFactor && count < ATTEMPTS); + } while (!correctReplFactor && attempt < 40); - if (count == ATTEMPTS) { - throw new TimeoutException("Timed out waiting for " + fileName + - " to reach " + replFactor + " replicas"); + if (!correctReplFactor) { + throw new TimeoutException("Timed out waiting for file [" + + fileName + "] to reach [" + replFactor + "] replicas"); } + + LOG.info("All blocks of file {} verified to have replication factor {}", + fileName, replFactor); } /** delete directory and everything underneath it.*/ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerRPCDelay.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerRPCDelay.java index 79c7f87d4e70d..9752d65982d6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerRPCDelay.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerRPCDelay.java @@ -20,13 +20,17 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.junit.After; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.Timeout; /** * The Balancer ensures that it disperses RPCs to the NameNode * in order to avoid NN's RPC queue saturation. */ public class TestBalancerRPCDelay { + @Rule + public Timeout globalTimeout = Timeout.seconds(100); private TestBalancer testBalancer; @@ -43,12 +47,12 @@ public void teardown() throws Exception { } } - @Test(timeout=100000) + @Test public void testBalancerRPCDelayQps3() throws Exception { testBalancer.testBalancerRPCDelay(3); } - @Test(timeout=100000) + @Test public void testBalancerRPCDelayQpsDefault() throws Exception { testBalancer.testBalancerRPCDelay( DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT); From e643684c56261cc880598f9af815b7df910efcea Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Sun, 20 Jun 2021 17:19:05 +0530 Subject: [PATCH 429/666] HDFS-16080. RBF: Invoking method in all locations should break the loop after successful result (#3121). Contributed by Viraj Jasani. Signed-off-by: Ayush Saxena --- .../server/federation/router/RouterRpcClient.java | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index bc6d5bed6e0e7..a4833ff06c286 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -1129,25 +1129,17 @@ private static boolean isExpectedValue(Object expectedValue, Object value) { * Invoke method in all locations and return success if any succeeds. * * @param The type of the remote location. - * @param The type of the remote method return. * @param locations List of remote locations to call concurrently. * @param method The remote method and parameters to invoke. * @return If the call succeeds in any location. * @throws IOException If any of the calls return an exception. */ - public boolean invokeAll( + public boolean invokeAll( final Collection locations, final RemoteMethod method) - throws IOException { - boolean anyResult = false; + throws IOException { Map results = invokeConcurrent(locations, method, false, false, Boolean.class); - for (Boolean value : results.values()) { - boolean result = value.booleanValue(); - if (result) { - anyResult = true; - } - } - return anyResult; + return results.containsValue(true); } /** From d9f886874331ea4ef605598c0128bd37cf8aa06f Mon Sep 17 00:00:00 2001 From: litao Date: Sun, 20 Jun 2021 19:53:11 +0800 Subject: [PATCH 430/666] HDFS-16079. Improve the block state change log (#3120). Contributed by tomscut. Signed-off-by: He Xiaoqiao Signed-off-by: Ayush Saxena --- .../hdfs/server/blockmanagement/DatanodeAdminManager.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java index 70ae44a38092d..c500ab5a9bf42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java @@ -377,6 +377,8 @@ protected void logBlockReplicationInfo(BlockInfo block, + ", maintenance replicas: " + num.maintenanceReplicas() + ", live entering maintenance replicas: " + num.liveEnteringMaintenanceReplicas() + + ", replicas on stale nodes: " + num.replicasOnStaleNodes() + + ", readonly replicas: " + num.readOnlyReplicas() + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + bc.isUnderConstruction() + ", Datanodes having this block: " + nodeList + ", Current Datanode: " From 8b42eb2e100aaa05688a88fb93b6f6561e7128da Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 21 Jun 2021 06:55:12 +0530 Subject: [PATCH 431/666] HDFS-16075. Use empty array constants present in StorageType and DatanodeInfo to avoid creating redundant objects (#3115) Reviewed-by: Hui Fei --- .../main/java/org/apache/hadoop/fs/BlockLocation.java | 2 +- .../java/org/apache/hadoop/fs/TestBlockLocation.java | 2 +- .../main/java/org/apache/hadoop/hdfs/DataStreamer.java | 2 +- .../org/apache/hadoop/hdfs/protocol/LocatedBlocks.java | 2 +- .../hadoop/hdfs/server/federation/MockNamenode.java | 2 +- .../hdfs/server/federation/router/TestRouterRpc.java | 2 +- .../federation/router/TestRouterRpcMultiDestination.java | 2 +- .../hadoop/hdfs/server/namenode/FSDirAppendOp.java | 2 +- .../org/apache/hadoop/hdfs/TestDFSClientRetries.java | 2 +- .../java/org/apache/hadoop/hdfs/TestDFSInputStream.java | 4 ++-- .../java/org/apache/hadoop/hdfs/TestLeaseRecovery.java | 6 +++--- .../apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java | 2 +- .../apache/hadoop/hdfs/protocol/TestLocatedBlock.java | 2 +- .../hadoop/hdfs/security/token/block/TestBlockToken.java | 2 +- .../hdfs/server/blockmanagement/TestDatanodeManager.java | 9 ++++++--- .../server/datanode/TestDataXceiverLazyPersistHint.java | 4 ++-- .../hadoop/hdfs/server/datanode/TestDiskError.java | 2 +- .../TestBlockPlacementPolicyRackFaultTolerant.java | 4 ++-- .../hadoop/hdfs/server/namenode/ha/TestHASafeMode.java | 2 +- 19 files changed, 29 insertions(+), 26 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java index c6dde52d83dd1..29358dd7d1086 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java @@ -74,7 +74,7 @@ public class BlockLocation implements Serializable { private static final String[] EMPTY_STR_ARRAY = new String[0]; private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY = - new StorageType[0]; + StorageType.EMPTY_ARRAY; /** * Default Constructor. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java index 8569ea7cf781d..72e850b1313d5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java @@ -27,7 +27,7 @@ public class TestBlockLocation { private static final String[] EMPTY_STR_ARRAY = new String[0]; private static final StorageType[] EMPTY_STORAGE_TYPE_ARRAY = - new StorageType[0]; + StorageType.EMPTY_ARRAY; private static void checkBlockLocation(final BlockLocation loc) throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index e04268eddce79..4b5f3c33982ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -1683,7 +1683,7 @@ public void updatePipeline(long newGS) throws IOException { DatanodeInfo[] getExcludedNodes() { return excludedNodes.getAllPresent(excludedNodes.asMap().keySet()) - .keySet().toArray(new DatanodeInfo[0]); + .keySet().toArray(DatanodeInfo.EMPTY_ARRAY); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java index baf59ce61367b..1f5b85e315f8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java @@ -131,7 +131,7 @@ public ErasureCodingPolicy getErasureCodingPolicy() { public int findBlock(long offset) { // create fake block of size 0 as a key LocatedBlock key = new LocatedBlock( - new ExtendedBlock(), new DatanodeInfo[0]); + new ExtendedBlock(), DatanodeInfo.EMPTY_ARRAY); key.setStartOffset(offset); key.getBlock().setNumBytes(1); Comparator comp = diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java index f908065384193..a4755c20fcae4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java @@ -535,7 +535,7 @@ private static HdfsFileStatus getMockHdfsFileStatus( */ private static LocatedBlock getMockLocatedBlock(final String nsId) { LocatedBlock lb = mock(LocatedBlock.class); - when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]); + when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY); DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111, 1112, 1113, 1114); DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 4f772cc4dc89c..21329c8142331 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -1241,7 +1241,7 @@ public void testProxyGetAdditionalDatanode() newRouterFile, clientName, null, null, status.getFileId(), null, null); - DatanodeInfo[] exclusions = new DatanodeInfo[0]; + DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY; LocatedBlock newBlock = routerProtocol.getAdditionalDatanode( newRouterFile, status.getFileId(), block.getBlock(), block.getLocations(), block.getStorageIDs(), exclusions, 1, clientName); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 4f112ba9b7294..e50464c0be7b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -314,7 +314,7 @@ testPath, new FsPermission("777"), clientName, assertEquals(1, proxyNumAddBlock2 - proxyNumAddBlock); // Get additionalDatanode via router and block is not null. - DatanodeInfo[] exclusions = new DatanodeInfo[0]; + DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY; LocatedBlock newBlock = clientProtocol.getAdditionalDatanode( testPath, status.getFileId(), blockTwo.getBlock(), blockTwo.getLocations(), blockTwo.getStorageIDs(), exclusions, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java index 2586a257beff3..7e90d4bafa8bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java @@ -209,7 +209,7 @@ static LocatedBlock prepareFileForAppend(final FSNamesystem fsn, BlockInfo lastBlock = file.getLastBlock(); if (lastBlock != null) { ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock); - ret = new LocatedBlock(blk, new DatanodeInfo[0]); + ret = new LocatedBlock(blk, DatanodeInfo.EMPTY_ARRAY); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 30115efc384e5..970003b0e58cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -883,7 +883,7 @@ public void testClientDNProtocolTimeout() throws IOException { DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); - LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); + LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY); ClientDatanodeProtocol proxy = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java index f2d580576cfa3..2f9e0d319cdba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java @@ -242,7 +242,7 @@ public void testReadWithPreferredCachingReplica() throws IOException { DFSInputStream dfsInputStream = (DFSInputStream) fs.open(filePath).getWrappedStream(); LocatedBlock lb = mock(LocatedBlock.class); - when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]); + when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY); DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111, 1112, 1113, 1114); DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId); @@ -271,7 +271,7 @@ public void testReadWithoutPreferredCachingReplica() throws IOException { DFSInputStream dfsInputStream = (DFSInputStream) fs.open(filePath).getWrappedStream(); LocatedBlock lb = mock(LocatedBlock.class); - when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]); + when(lb.getCachedLocations()).thenReturn(DatanodeInfo.EMPTY_ARRAY); DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111, 1112, 1113, 1114); DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index ca3065088c40b..e2c956ecd277f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -461,8 +461,8 @@ public void testAbortedRecovery() throws Exception { // Add a block to the file ExtendedBlock block = client.getNamenode().addBlock( - file, client.clientName, null, new DatanodeInfo[0], stat.getFileId(), - new String[0], null).getBlock(); + file, client.clientName, null, DatanodeInfo.EMPTY_ARRAY, + stat.getFileId(), new String[0], null).getBlock(); // update the pipeline to get a new genstamp. ExtendedBlock updatedBlock = client.getNamenode() @@ -578,7 +578,7 @@ private void createCommittedNotCompleteFile(DFSClient client, String file, // Add a block to the file LocatedBlock blk = client.getNamenode() .addBlock(file, client.clientName, null, - new DatanodeInfo[0], stat.getFileId(), new String[0], null); + DatanodeInfo.EMPTY_ARRAY, stat.getFileId(), new String[0], null); ExtendedBlock finalBlock = blk.getBlock(); if (bytesToWrite != null) { // Here we create a output stream and then abort it so the block gets diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index 925f93648d49c..5015722c61f7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -65,7 +65,7 @@ public void testDefaultPolicy() throws Exception { final DatanodeInfo[] infos = new DatanodeInfo[5]; final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][]; - datanodes[0] = new DatanodeInfo[0]; + datanodes[0] = DatanodeInfo.EMPTY_ARRAY; for(int i = 0; i < infos.length; ) { infos[i] = DFSTestUtil.getLocalDatanodeInfo(9867 + i); i++; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java index 3546c89938f03..33b5bd90b2a25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java @@ -31,7 +31,7 @@ public class TestLocatedBlock { @Test(timeout = 10000) public void testAddCachedLocWhenEmpty() { - DatanodeInfo[] ds = new DatanodeInfo[0]; + DatanodeInfo[] ds = DatanodeInfo.EMPTY_ARRAY; ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1); LocatedBlock l1 = new LocatedBlock(b1, ds); DatanodeDescriptor dn = new DatanodeDescriptor( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index 524656aa76341..d08276b06983b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -391,7 +391,7 @@ private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception { DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); - LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); + LocatedBlock fakeBlock = new LocatedBlock(b, DatanodeInfo.EMPTY_ARRAY); fakeBlock.setBlockToken(token); // Create another RPC proxy with the same configuration - this will never diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java index 4fbfbcfa729e2..5f5452ac16d59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java @@ -392,7 +392,8 @@ public void HelperFunction(String scriptFileName, int providedStorages) storageTypesList.add(StorageType.PROVIDED); } - StorageType[] storageTypes= storageTypesList.toArray(new StorageType[0]); + StorageType[] storageTypes = storageTypesList.toArray( + StorageType.EMPTY_ARRAY); for (int i = 0; i < totalDNs; i++) { // register new datanode @@ -694,7 +695,8 @@ public void testGetBlockLocationConsiderStorageType() List storageTypesList = new ArrayList<>(Arrays.asList(StorageType.ARCHIVE, StorageType.DISK, StorageType.SSD, StorageType.DEFAULT, StorageType.SSD)); - StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]); + StorageType[] storageTypes = storageTypesList.toArray( + StorageType.EMPTY_ARRAY); for (int i = 0; i < totalDNs; i++) { // Register new datanode. @@ -779,7 +781,8 @@ public void testGetBlockLocationConsiderStorageTypeAndLoad() List storageTypesList = new ArrayList<>(Arrays.asList(StorageType.DISK, StorageType.DISK, StorageType.DEFAULT, StorageType.SSD, StorageType.SSD)); - StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]); + StorageType[] storageTypes = storageTypesList.toArray( + StorageType.EMPTY_ARRAY); for (int i = 0; i < totalDNs; i++) { // Register new datanode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java index dab33b2c037dc..611360d6cb15b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java @@ -127,8 +127,8 @@ private void issueWriteBlockCall(DataXceiver xceiver, boolean lazyPersist) StorageType.RAM_DISK, null, "Dummy-Client", - new DatanodeInfo[0], - new StorageType[0], + DatanodeInfo.EMPTY_ARRAY, + StorageType.EMPTY_ARRAY, mock(DatanodeInfo.class), BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, 0, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index 64c5a5b93aa48..53be71f9a34c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -165,7 +165,7 @@ public void testReplicationError() throws Exception { DataChecksum.Type.CRC32, 512); new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT, BlockTokenSecretManager.DUMMY_TOKEN, "", - new DatanodeInfo[0], new StorageType[0], null, + DatanodeInfo.EMPTY_ARRAY, StorageType.EMPTY_ARRAY, null, BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L, checksum, CachingStrategy.newDefaultStrategy(), false, false, null, null, new String[0]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java index 9825f13cea5c9..88b7d2bf7f23d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java @@ -119,7 +119,7 @@ private void doTestChooseTargetNormalCase() throws Exception { LocatedBlock additionalLocatedBlock = nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(), locatedBlock.getBlock(), locatedBlock.getLocations(), - locatedBlock.getStorageIDs(), new DatanodeInfo[0], + locatedBlock.getStorageIDs(), DatanodeInfo.EMPTY_ARRAY, additionalReplication, clientMachine); doTestLocatedBlock(replication + additionalReplication, additionalLocatedBlock); } @@ -159,7 +159,7 @@ private void doTestChooseTargetSpecialCase() throws Exception { LocatedBlock additionalLocatedBlock = nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(), locatedBlock.getBlock(), partLocs, - partStorageIDs, new DatanodeInfo[0], + partStorageIDs, DatanodeInfo.EMPTY_ARRAY, j, clientMachine); doTestLocatedBlock(i + j, additionalLocatedBlock); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index affa348c993df..537e6a34bd0b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -859,7 +859,7 @@ public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception { pathString, client.getClientName(), new ExtendedBlock(previousBlock), - new DatanodeInfo[0], + DatanodeInfo.EMPTY_ARRAY, DFSClientAdapter.getFileId((DFSOutputStream) create .getWrappedStream()), null, null); cluster.restartNameNode(0, true); From 978641472db18cd9e0648724d820d0bec742228a Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 21 Jun 2021 13:16:06 +0530 Subject: [PATCH 432/666] MAPREDUCE-7354. Use empty array constant present in TaskCompletionEvent to avoid creating redundant objects (#3123) Reviewed-by: Hui Fei Reviewed-by: Akira Ajisaka --- .../hadoop/mapreduce/v2/app/job/impl/JobImpl.java | 2 +- .../java/org/apache/hadoop/mapreduce/TestJob.java | 2 +- .../hadoop/mapreduce/TestJobMonitorAndPrint.java | 13 +++---------- .../apache/hadoop/mapreduce/v2/hs/UnparsedJob.java | 2 +- 4 files changed, 6 insertions(+), 13 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index 5489f52f6ee0b..290b4a240f247 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -146,7 +146,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS = new TaskAttemptCompletionEvent[0]; private static final TaskCompletionEvent[] - EMPTY_TASK_COMPLETION_EVENTS = new TaskCompletionEvent[0]; + EMPTY_TASK_COMPLETION_EVENTS = TaskCompletionEvent.EMPTY_ARRAY; private static final Logger LOG = LoggerFactory.getLogger(JobImpl.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java index 60f390f44650d..d1ebe2f982d5f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJob.java @@ -50,7 +50,7 @@ public void testJobToString() throws IOException, InterruptedException { when(client.getTaskReports(jobid, TaskType.REDUCE)).thenReturn( new TaskReport[0]); when(client.getTaskCompletionEvents(jobid, 0, 10)).thenReturn( - new TaskCompletionEvent[0]); + TaskCompletionEvent.EMPTY_ARRAY); Job job = Job.getInstance(cluster, status, new JobConf()); Assert.assertNotNull(job.toString()); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java index 779a031c66893..e91b4c1e85498 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java @@ -45,7 +45,6 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.WriterAppender; -import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; /** @@ -81,15 +80,9 @@ public void testJobMonitorAndPrint() throws Exception { 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true); - doAnswer( - new Answer() { - @Override - public TaskCompletionEvent[] answer(InvocationOnMock invocation) - throws Throwable { - return new TaskCompletionEvent[0]; - } - } - ).when(job).getTaskCompletionEvents(anyInt(), anyInt()); + doAnswer((Answer) invocation -> + TaskCompletionEvent.EMPTY_ARRAY).when(job) + .getTaskCompletionEvents(anyInt(), anyInt()); doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class)); when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java index ecc494552831c..e1f97a8899dba 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java @@ -183,7 +183,7 @@ public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents( @Override public TaskCompletionEvent[] getMapAttemptCompletionEvents( int startIndex, int maxEvents) { - return new TaskCompletionEvent[0]; + return TaskCompletionEvent.EMPTY_ARRAY; } @Override From e2e5fa9880b9b507b1b7850c439a913b06c0f1c3 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Tue, 22 Jun 2021 00:32:55 +0800 Subject: [PATCH 433/666] HDFS-14575. LeaseRenewer#daemon threads leak in DFSClient. Contributed by Renukaprasad C. Co-authored-by: Tao Yang Reviewed-by: He Xiaoqiao Reviewed-by: Wei-Chiu Chuang --- .../org/apache/hadoop/hdfs/DFSClient.java | 10 ++- .../hadoop/hdfs/client/impl/LeaseRenewer.java | 34 ++++++-- .../hdfs/client/impl/TestLeaseRenewer.java | 87 +++++++++++++++++++ 3 files changed, 125 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 3fa4dd06fec52..56adc5c065eee 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -505,7 +505,15 @@ private void beginFileLease(final long inodeId, final DFSOutputStream out) throws IOException { synchronized (filesBeingWritten) { putFileBeingWritten(inodeId, out); - getLeaseRenewer().put(this); + LeaseRenewer renewer = getLeaseRenewer(); + boolean result = renewer.put(this); + if (!result) { + // Existing LeaseRenewer cannot add another Daemon, so remove existing + // and add new one. + LeaseRenewer.remove(renewer); + renewer = getLeaseRenewer(); + renewer.put(this); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java index d108af987cc8d..6b4c89945445f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java @@ -26,6 +26,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; @@ -79,6 +80,8 @@ public class LeaseRenewer { private static long leaseRenewerGraceDefault = 60*1000L; static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L; + private AtomicBoolean isLSRunning = new AtomicBoolean(false); + /** Get a {@link LeaseRenewer} instance */ public static LeaseRenewer getInstance(final String authority, final UserGroupInformation ugi, final DFSClient dfsc) { @@ -87,6 +90,15 @@ public static LeaseRenewer getInstance(final String authority, return r; } + /** + * Remove the given renewer from the Factory. + * Subsequent call will receive new {@link LeaseRenewer} instance. + * @param renewer Instance to be cleared from Factory + */ + public static void remove(LeaseRenewer renewer) { + Factory.INSTANCE.remove(renewer); + } + /** * A factory for sharing {@link LeaseRenewer} objects * among {@link DFSClient} instances @@ -156,6 +168,9 @@ private synchronized void remove(final LeaseRenewer r) { final LeaseRenewer stored = renewers.get(r.factorykey); //Since a renewer may expire, the stored renewer can be different. if (r == stored) { + // Expire LeaseRenewer daemon thread as soon as possible. + r.clearClients(); + r.setEmptyTime(0); renewers.remove(r.factorykey); } } @@ -241,6 +256,10 @@ private synchronized void addClient(final DFSClient dfsc) { } } + private synchronized void clearClients() { + dfsclients.clear(); + } + private synchronized boolean clientsRunning() { for(Iterator i = dfsclients.iterator(); i.hasNext(); ) { if (!i.next().isClientRunning()) { @@ -292,11 +311,18 @@ private synchronized boolean isRenewerExpired() { && Time.monotonicNow() - emptyTime > gracePeriod; } - public synchronized void put(final DFSClient dfsc) { + public synchronized boolean put(final DFSClient dfsc) { if (dfsc.isClientRunning()) { if (!isRunning() || isRenewerExpired()) { - //start a new deamon with a new id. + // Start a new daemon with a new id. final int id = ++currentId; + if (isLSRunning.get()) { + // Not allowed to add multiple daemons into LeaseRenewer, let client + // create new LR and continue to acquire lease. + return false; + } + isLSRunning.getAndSet(true); + daemon = new Daemon(new Runnable() { @Override public void run() { @@ -328,6 +354,7 @@ public String toString() { } emptyTime = Long.MAX_VALUE; } + return true; } @VisibleForTesting @@ -426,9 +453,6 @@ private void run(final int id) throws InterruptedException { synchronized (this) { DFSClientFaultInjector.get().delayWhenRenewLeaseTimeout(); dfsclientsCopy = new ArrayList<>(dfsclients); - dfsclients.clear(); - //Expire the current LeaseRenewer thread. - emptyTime = 0; Factory.INSTANCE.remove(LeaseRenewer.this); } for (DFSClient dfsClient : dfsclientsCopy) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java index 1ffec85e02b8a..f1a11edeefcd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java @@ -31,7 +31,11 @@ import org.mockito.stubbing.Answer; import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.regex.Pattern; import static org.junit.Assert.assertSame; @@ -168,6 +172,11 @@ public Boolean get() { renewer.closeClient(mockClient1); renewer.closeClient(mockClient2); + renewer.closeClient(MOCK_DFSCLIENT); + + // Make sure renewer is not running due to expiration. + Thread.sleep(FAST_GRACE_PERIOD * 2); + Assert.assertTrue(!renewer.isRunning()); } @Test @@ -197,4 +206,82 @@ public void testThreadName() throws Exception { Assert.assertFalse(renewer.isRunning()); } + /** + * Test for HDFS-14575. In this fix, the LeaseRenewer clears all clients + * and expires immediately via setting empty time to 0 before it's removed + * from factory. Previously, LeaseRenewer#daemon thread might leak. + */ + @Test + public void testDaemonThreadLeak() throws Exception { + Assert.assertFalse("Renewer not initially running", renewer.isRunning()); + + // Pretend to create a file#1, daemon#1 starts + renewer.put(MOCK_DFSCLIENT); + Assert.assertTrue("Renewer should have started running", + renewer.isRunning()); + Pattern daemonThreadNamePattern = Pattern.compile("LeaseRenewer:\\S+"); + Assert.assertEquals(1, countThreadMatching(daemonThreadNamePattern)); + + // Pretend to create file#2, daemon#2 starts due to expiration + LeaseRenewer lastRenewer = renewer; + renewer = + LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT); + Assert.assertEquals(lastRenewer, renewer); + + // Pretend to close file#1 + renewer.closeClient(MOCK_DFSCLIENT); + Assert.assertEquals(1, countThreadMatching(daemonThreadNamePattern)); + + // Pretend to be expired + renewer.setEmptyTime(0); + + renewer = + LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT); + renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD); + boolean success = renewer.put(MOCK_DFSCLIENT); + if (!success) { + LeaseRenewer.remove(renewer); + renewer = + LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT); + renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD); + renewer.put(MOCK_DFSCLIENT); + } + + int threadCount = countThreadMatching(daemonThreadNamePattern); + //Sometimes old LR#Daemon gets closed and lead to count 1 (rare scenario) + Assert.assertTrue(1 == threadCount || 2 == threadCount); + + // After grace period, both daemon#1 and renewer#1 will be removed due to + // expiration, then daemon#2 will leak before HDFS-14575. + Thread.sleep(FAST_GRACE_PERIOD * 2); + + // Pretend to close file#2, renewer#2 will be created + lastRenewer = renewer; + renewer = + LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT); + Assert.assertEquals(lastRenewer, renewer); + renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD); + renewer.closeClient(MOCK_DFSCLIENT); + renewer.setEmptyTime(0); + // Make sure LeaseRenewer#daemon threads will terminate after grace period + Thread.sleep(FAST_GRACE_PERIOD * 2); + Assert.assertEquals("LeaseRenewer#daemon thread leaks", 0, + countThreadMatching(daemonThreadNamePattern)); + } + + private static int countThreadMatching(Pattern pattern) { + ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); + ThreadInfo[] infos = + threadBean.getThreadInfo(threadBean.getAllThreadIds(), 1); + int count = 0; + for (ThreadInfo info : infos) { + if (info == null) { + continue; + } + if (pattern.matcher(info.getThreadName()).matches()) { + count++; + } + } + return count; + } } From f9996bb7b716e5c6b79bfe9e5cfb358e0d2b88dd Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 23 Jun 2021 08:51:34 +0530 Subject: [PATCH 434/666] HDFS-16082. Atomic operations on exceptionsSinceLastBalance and failedTimesSinceLastSuccessfulBalance in Balancer (#3127) --- .../hadoop/hdfs/server/balancer/Balancer.java | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 33650eac2a9af..73a8e681915fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -37,6 +37,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @@ -211,8 +212,10 @@ public class Balancer { @VisibleForTesting private static volatile boolean serviceRunning = false; - private static volatile int exceptionsSinceLastBalance = 0; - private static volatile int failedTimesSinceLastSuccessfulBalance = 0; + private static final AtomicInteger EXCEPTIONS_SINCE_LAST_BALANCE = + new AtomicInteger(0); + private static final AtomicInteger + FAILED_TIMES_SINCE_LAST_SUCCESSFUL_BALANCE = new AtomicInteger(0); private final Dispatcher dispatcher; private final NameNodeConnector nnc; @@ -274,11 +277,11 @@ static int getInt(Configuration conf, String key, int defaultValue) { } static int getExceptionsSinceLastBalance() { - return exceptionsSinceLastBalance; + return EXCEPTIONS_SINCE_LAST_BALANCE.get(); } static int getFailedTimesSinceLastSuccessfulBalance() { - return failedTimesSinceLastSuccessfulBalance; + return FAILED_TIMES_SINCE_LAST_SUCCESSFUL_BALANCE.get(); } /** @@ -866,20 +869,21 @@ static int run(Collection namenodes, Collection nsIds, int retCode = doBalance(namenodes, nsIds, p, conf); if (retCode < 0) { LOG.info("Balance failed, error code: " + retCode); - failedTimesSinceLastSuccessfulBalance++; + FAILED_TIMES_SINCE_LAST_SUCCESSFUL_BALANCE.incrementAndGet(); } else { LOG.info("Balance succeed!"); - failedTimesSinceLastSuccessfulBalance = 0; + FAILED_TIMES_SINCE_LAST_SUCCESSFUL_BALANCE.set(0); } - exceptionsSinceLastBalance = 0; + EXCEPTIONS_SINCE_LAST_BALANCE.set(0); } catch (Exception e) { - if (++exceptionsSinceLastBalance > retryOnException) { + if (EXCEPTIONS_SINCE_LAST_BALANCE.incrementAndGet() + > retryOnException) { // The caller will process and log the exception throw e; } LOG.warn( "Encounter exception while do balance work. Already tried {} times", - exceptionsSinceLastBalance, e); + EXCEPTIONS_SINCE_LAST_BALANCE, e); } // sleep for next round, will retry for next round when it's interrupted From b0cb50c8b6bcffc3ce051635c62193cb50a4e168 Mon Sep 17 00:00:00 2001 From: Masatake Iwasaki Date: Wed, 23 Jun 2021 19:33:03 +0900 Subject: [PATCH 435/666] YARN-10826. [UI2] Upgrade Node.js to v12.22.1. (#3128) --- .../hadoop-yarn/hadoop-yarn-ui/pom.xml | 2 +- .../hadoop-yarn-ui/src/main/webapp/bower.json | 2 +- .../src/main/webapp/package.json | 3 ++ .../hadoop-yarn-ui/src/main/webapp/yarn.lock | 29 +------------------ 4 files changed, 6 insertions(+), 30 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml index cf3793e235cd2..d8be4c7667319 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml @@ -184,7 +184,7 @@ install-node-and-yarn - v10.23.1 + v12.22.1 v1.22.5 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower.json index 0ef1367c24d19..b843dd5f099c2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower.json +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower.json @@ -1,7 +1,7 @@ { "name": "yarn-ui", "dependencies": { - "ember": "2.2.0", + "ember": "2.11.3", "ember-cli-shims": "0.0.6", "ember-cli-test-loader": "0.2.1", "ember-data": "2.1.0", diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json index 99d6c524cd4e5..91c5a0f3e671b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json @@ -58,5 +58,8 @@ "dependencies": { "em-helpers": "^0.8.0", "em-table": "0.12.0" + }, + "resolutions": { + "graceful-fs": "^4.2.4" } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock index 16594389bd58e..4b055e6f1fd9b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock @@ -3215,33 +3215,11 @@ globule@^1.0.0: lodash "~4.17.10" minimatch "~3.0.2" -graceful-fs@^3.0.5: - version "3.0.12" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-3.0.12.tgz#0034947ce9ed695ec8ab0b854bc919e82b1ffaef" - integrity sha512-J55gaCS4iTTJfTXIxSVw3EMQckcqkpdRv3IR7gu6sq0+tbC363Zx6KH/SEwXASK9JRbhyZmVjJEVJIOxYsB3Qg== - dependencies: - natives "^1.1.3" - -graceful-fs@^4.1.11, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9: - version "4.2.3" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.3.tgz#4a12ff1b60376ef09862c2093edd908328be8423" - integrity sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ== - -graceful-fs@^4.1.2: +graceful-fs@^3.0.5, graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@^4.2.4, graceful-fs@~2.0.0, graceful-fs@~4.1.2: version "4.2.6" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.6.tgz#ff040b2b0853b23c3d31027523706f1885d76bee" integrity sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ== -graceful-fs@~2.0.0: - version "2.0.3" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0" - integrity sha1-fNLNsiiko/Nule+mzBQt59GhNtA= - -graceful-fs@~4.1.2: - version "4.1.15" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.15.tgz#ffb703e1066e8a0eeaa4c8b80ba9253eeefbfb00" - integrity sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA== - growl@^1.8.1: version "1.10.5" resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" @@ -4674,11 +4652,6 @@ nan@^2.13.2: resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.2.tgz#f5376400695168f4cc694ac9393d0c9585eeea19" integrity sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ== -natives@^1.1.3: - version "1.1.6" - resolved "https://registry.yarnpkg.com/natives/-/natives-1.1.6.tgz#a603b4a498ab77173612b9ea1acdec4d980f00bb" - integrity sha512-6+TDFewD4yxY14ptjKaS63GVdtKiES1pTPyxn9Jb0rBqPMZ7VcCiooEhPNsr+mqHtMGxa/5c/HhcC4uPEUw/nA== - negotiator@0.6.2: version "0.6.2" resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb" From 7465205600b57a89cd202d5ef708e9f9f31f8013 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Wed, 23 Jun 2021 22:32:33 +0530 Subject: [PATCH 436/666] HADOOP-17766. CI for Debian 10 (#3129) --- dev-support/Jenkinsfile | 128 +++-------- dev-support/docker/Dockerfile_debian_10 | 4 +- dev-support/jenkins.sh | 212 ++++++++++++++++++ .../libhdfspp/lib/rpc/cyrus_sasl_engine.cc | 2 +- .../libhdfspp/tests/bad_datanode_test.cc | 1 - 5 files changed, 249 insertions(+), 98 deletions(-) create mode 100644 dev-support/jenkins.sh diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 528e2768c9b3a..37a350da6c385 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -32,7 +32,6 @@ pipeline { SOURCEDIR = 'src' // will also need to change notification section below PATCHDIR = 'out' - DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile" YETUS='yetus' // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' YETUS_VERSION='f9ba0170a5787a5f4662d3769804fef0226a182f' @@ -57,7 +56,17 @@ pipeline { } } - stage ('precommit-run') { + // This is an optional stage which runs only when there's a change in + // C++/C++ build/platform. + // This stage serves as a means of cross platform validation, which is + // really needed to ensure that any C++ related/platform change doesn't + // break the Hadoop build on Debian 10. + stage ('precommit-run Debian 10') { + environment { + DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile_debian_10" + IS_OPTIONAL = 1 + } + steps { withCredentials( [usernamePassword(credentialsId: 'apache-hadoop-at-github.com', @@ -68,100 +77,33 @@ pipeline { usernameVariable: 'JIRA_USER')]) { sh '''#!/usr/bin/env bash - set -e - - TESTPATCHBIN="${WORKSPACE}/${YETUS}/precommit/src/main/shell/test-patch.sh" - - # this must be clean for every run - if [[ -d "${WORKSPACE}/${PATCHDIR}" ]]; then - rm -rf "${WORKSPACE}/${PATCHDIR}" - fi - mkdir -p "${WORKSPACE}/${PATCHDIR}" - - # if given a JIRA issue, process it. If CHANGE_URL is set - # (e.g., Github Branch Source plugin), process it. - # otherwise exit, because we don't want Hadoop to do a - # full build. We wouldn't normally do this check for smaller - # projects. :) - if [[ -n "${JIRA_ISSUE_KEY}" ]]; then - YETUS_ARGS+=("${JIRA_ISSUE_KEY}") - elif [[ -z "${CHANGE_URL}" ]]; then - echo "Full build skipped" > "${WORKSPACE}/${PATCHDIR}/report.html" - exit 0 - fi - - YETUS_ARGS+=("--patch-dir=${WORKSPACE}/${PATCHDIR}") - - # where the source is located - YETUS_ARGS+=("--basedir=${WORKSPACE}/${SOURCEDIR}") - - # our project defaults come from a personality file - YETUS_ARGS+=("--project=hadoop") - YETUS_ARGS+=("--personality=${WORKSPACE}/${SOURCEDIR}/dev-support/bin/hadoop.sh") - - # lots of different output formats - YETUS_ARGS+=("--brief-report-file=${WORKSPACE}/${PATCHDIR}/brief.txt") - YETUS_ARGS+=("--console-report-file=${WORKSPACE}/${PATCHDIR}/console.txt") - YETUS_ARGS+=("--html-report-file=${WORKSPACE}/${PATCHDIR}/report.html") - - # enable writing back to Github - YETUS_ARGS+=(--github-token="${GITHUB_TOKEN}") - - # enable writing back to ASF JIRA - YETUS_ARGS+=(--jira-password="${JIRA_PASSWORD}") - YETUS_ARGS+=(--jira-user="${JIRA_USER}") - - # auto-kill any surefire stragglers during unit test runs - YETUS_ARGS+=("--reapermode=kill") - - # set relatively high limits for ASF machines - # changing these to higher values may cause problems - # with other jobs on systemd-enabled machines - YETUS_ARGS+=("--proclimit=5500") - YETUS_ARGS+=("--dockermemlimit=22g") - - # -1 spotbugs issues that show up prior to the patch being applied - YETUS_ARGS+=("--spotbugs-strict-precheck") - - # rsync these files back into the archive dir - YETUS_ARGS+=("--archive-list=checkstyle-errors.xml,spotbugsXml.xml") - - # URL for user-side presentation in reports and such to our artifacts - # (needs to match the archive bits below) - YETUS_ARGS+=("--build-url-artifacts=artifact/out") - - # plugins to enable - YETUS_ARGS+=("--plugins=all") - - # don't let these tests cause -1s because we aren't really paying that - # much attention to them - YETUS_ARGS+=("--tests-filter=checkstyle") - - # run in docker mode and specifically point to our - # Dockerfile since we don't want to use the auto-pulled version. - YETUS_ARGS+=("--docker") - YETUS_ARGS+=("--dockerfile=${DOCKERFILE}") - YETUS_ARGS+=("--mvn-custom-repos") - - # effectively treat dev-suport as a custom maven module - YETUS_ARGS+=("--skip-dirs=dev-support") - - # help keep the ASF boxes clean - YETUS_ARGS+=("--sentinel") - - # test with Java 8 and 11 - YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64") - YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64") - YETUS_ARGS+=("--multijdktests=compile") + chmod u+x "${SOURCEDIR}/dev-support/jenkins.sh" + "${SOURCEDIR}/dev-support/jenkins.sh" + ''' + } + } + } - # custom javadoc goals - YETUS_ARGS+=("--mvn-javadoc-goals=process-sources,javadoc:javadoc-no-fork") + // We want to use Ubuntu Focal as our main CI and thus, this stage + // isn't optional (runs for all the PRs). + stage ('precommit-run Ubuntu focal') { + environment { + DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile" + IS_OPTIONAL = 0 + } - # write Yetus report as GitHub comment (YETUS-1102) - YETUS_ARGS+=("--github-write-comment") - YETUS_ARGS+=("--github-use-emoji-vote") + steps { + withCredentials( + [usernamePassword(credentialsId: 'apache-hadoop-at-github.com', + passwordVariable: 'GITHUB_TOKEN', + usernameVariable: 'GITHUB_USER'), + usernamePassword(credentialsId: 'hadoopqa-at-asf-jira', + passwordVariable: 'JIRA_PASSWORD', + usernameVariable: 'JIRA_USER')]) { + sh '''#!/usr/bin/env bash - "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" + chmod u+x "${SOURCEDIR}/dev-support/jenkins.sh" + "${SOURCEDIR}/dev-support/jenkins.sh" ''' } } diff --git a/dev-support/docker/Dockerfile_debian_10 b/dev-support/docker/Dockerfile_debian_10 index 8b7cbde7e624d..9601c3f35522a 100644 --- a/dev-support/docker/Dockerfile_debian_10 +++ b/dev-support/docker/Dockerfile_debian_10 @@ -54,9 +54,7 @@ RUN apt-get -q update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -RUN locale-gen en_US.UTF-8 -ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8' -ENV PYTHONIOENCODING=utf-8 +# TODO : Set locale ###### # Set env vars required to build Hadoop diff --git a/dev-support/jenkins.sh b/dev-support/jenkins.sh new file mode 100644 index 0000000000000..09905dd320cb7 --- /dev/null +++ b/dev-support/jenkins.sh @@ -0,0 +1,212 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script is called from the Jenkinsfile, which ultimately runs +# the CI through Yetus. +# We use Ubuntu Focal as the main platform for building Hadoop, thus +# it runs for all the PRs. Additionally, we also ensure that +# Hadoop builds across the supported platforms whenever there's a change +# in any of the C++ files, C++ build files or platform changes. + +## @description Check if the given extension is related to C++ +## @param seeking +## @return 0 if yes +## @return 1 if no +is_cpp_extension() { + local cpp_extensions=("cc" "cpp" "h" "hpp") + local seeking=$1 + + for element in "${cpp_extensions[@]}"; do + if [[ $element == "$seeking" ]]; then + return 0 + fi + done + return 1 +} + +## @description Check if the given relative path corresponds to +## change in platform files +## @param in_path +## @return 0 if yes +## @return 1 if no +is_platform_change() { + declare in_path + in_path="${SOURCEDIR}"/"${1}" + + for path in "${SOURCEDIR}"/dev-support/docker/Dockerfile* "${SOURCEDIR}"/dev-support/docker/pkg-resolver/*.json; do + if [ "${in_path}" == "${path}" ]; then + echo "Found C++ platform related changes in ${in_path}" + return 0 + fi + done + return 1 +} + +## @description Checks if the given path corresponds to a change +## in C++ files or related to C++ build system +## @param path +## @return 0 if yes +## @return 1 if no +is_cpp_change() { + shopt -s nocasematch + + local path=$1 + declare filename + filename=$(basename -- "${path}") + extension=${filename##*.} + + if is_cpp_extension "${extension}"; then + echo "Found C++ changes in ${path}" + return 0 + fi + + if [[ $filename =~ CMakeLists\.txt ]]; then + echo "Found C++ build related changes in ${path}" + return 0 + fi + return 1 +} + +## @description Check if the CI needs to be run - CI will always run if +## IS_OPTIONAL is 0, or if there's any change in +## C++/C++ build/platform +## @return 0 if yes +## @return 1 if no +function check_ci_run() { + # Get the first commit of this PR relative to the trunk branch + firstCommitOfThisPr=$(git --git-dir "${SOURCEDIR}/.git" rev-parse origin/trunk) + + # Loop over the paths of all the changed files and check if the criteria + # to run the CI has been satisfied + for path in $(git --git-dir "${SOURCEDIR}/.git" diff --name-only "${firstCommitOfThisPr}" HEAD); do + if is_cpp_change "${path}"; then + return 0 + fi + + if is_platform_change "${path}"; then + return 0 + fi + done + + # We must run the CI if it's not optional + if [ "$IS_OPTIONAL" -eq 0 ]; then + return 0 + fi + return 1 +} + +## @description Run the CI using YETUS +function run_ci() { + TESTPATCHBIN="${WORKSPACE}/${YETUS}/precommit/src/main/shell/test-patch.sh" + + # this must be clean for every run + if [[ -d "${WORKSPACE}/${PATCHDIR}" ]]; then + rm -rf "${WORKSPACE:?}/${PATCHDIR}" + fi + mkdir -p "${WORKSPACE}/${PATCHDIR}" + + # if given a JIRA issue, process it. If CHANGE_URL is set + # (e.g., Github Branch Source plugin), process it. + # otherwise exit, because we don't want Hadoop to do a + # full build. We wouldn't normally do this check for smaller + # projects. :) + if [[ -n "${JIRA_ISSUE_KEY}" ]]; then + YETUS_ARGS+=("${JIRA_ISSUE_KEY}") + elif [[ -z "${CHANGE_URL}" ]]; then + echo "Full build skipped" >"${WORKSPACE}/${PATCHDIR}/report.html" + exit 0 + fi + + YETUS_ARGS+=("--patch-dir=${WORKSPACE}/${PATCHDIR}") + + # where the source is located + YETUS_ARGS+=("--basedir=${WORKSPACE}/${SOURCEDIR}") + + # our project defaults come from a personality file + YETUS_ARGS+=("--project=hadoop") + YETUS_ARGS+=("--personality=${WORKSPACE}/${SOURCEDIR}/dev-support/bin/hadoop.sh") + + # lots of different output formats + YETUS_ARGS+=("--brief-report-file=${WORKSPACE}/${PATCHDIR}/brief.txt") + YETUS_ARGS+=("--console-report-file=${WORKSPACE}/${PATCHDIR}/console.txt") + YETUS_ARGS+=("--html-report-file=${WORKSPACE}/${PATCHDIR}/report.html") + + # enable writing back to Github + YETUS_ARGS+=("--github-token=${GITHUB_TOKEN}") + + # enable writing back to ASF JIRA + YETUS_ARGS+=("--jira-password=${JIRA_PASSWORD}") + YETUS_ARGS+=("--jira-user=${JIRA_USER}") + + # auto-kill any surefire stragglers during unit test runs + YETUS_ARGS+=("--reapermode=kill") + + # set relatively high limits for ASF machines + # changing these to higher values may cause problems + # with other jobs on systemd-enabled machines + YETUS_ARGS+=("--proclimit=5500") + YETUS_ARGS+=("--dockermemlimit=22g") + + # -1 spotbugs issues that show up prior to the patch being applied + YETUS_ARGS+=("--spotbugs-strict-precheck") + + # rsync these files back into the archive dir + YETUS_ARGS+=("--archive-list=checkstyle-errors.xml,spotbugsXml.xml") + + # URL for user-side presentation in reports and such to our artifacts + # (needs to match the archive bits below) + YETUS_ARGS+=("--build-url-artifacts=artifact/out") + + # plugins to enable + YETUS_ARGS+=("--plugins=all") + + # don't let these tests cause -1s because we aren't really paying that + # much attention to them + YETUS_ARGS+=("--tests-filter=checkstyle") + + # run in docker mode and specifically point to our + # Dockerfile since we don't want to use the auto-pulled version. + YETUS_ARGS+=("--docker") + YETUS_ARGS+=("--dockerfile=${DOCKERFILE}") + YETUS_ARGS+=("--mvn-custom-repos") + + # effectively treat dev-suport as a custom maven module + YETUS_ARGS+=("--skip-dirs=dev-support") + + # help keep the ASF boxes clean + YETUS_ARGS+=("--sentinel") + + # test with Java 8 and 11 + YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64") + YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64") + YETUS_ARGS+=("--multijdktests=compile") + + # custom javadoc goals + YETUS_ARGS+=("--mvn-javadoc-goals=process-sources,javadoc:javadoc-no-fork") + + # write Yetus report as GitHub comment (YETUS-1102) + YETUS_ARGS+=("--github-write-comment") + YETUS_ARGS+=("--github-use-emoji-vote") + + "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" +} + +# Check if the CI needs to be run, if so, do so :) +if check_ci_run; then + run_ci +fi diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/cyrus_sasl_engine.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/cyrus_sasl_engine.cc index 5c96edea5a3b1..ddc8a6e1db62e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/cyrus_sasl_engine.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/cyrus_sasl_engine.cc @@ -28,7 +28,7 @@ #include "common/logging.h" -#include "sasl_engine.h" +#include "sasl_engine.h" #include "cyrus_sasl_engine.h" namespace hdfs { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc index 911f7cae28a90..973212647e764 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/bad_datanode_test.cc @@ -32,7 +32,6 @@ #include - using hadoop::common::TokenProto; using hadoop::hdfs::DatanodeInfoProto; using hadoop::hdfs::DatanodeIDProto; From 3129f61b22511dadfe4c6fd5fd765cb599a0bcfc Mon Sep 17 00:00:00 2001 From: litao Date: Thu, 24 Jun 2021 10:27:40 +0800 Subject: [PATCH 437/666] HDFS-16076. Avoid using slow DataNodes for reading by sorting locations (#3117) --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 46 +++- .../BlockPlacementPolicyDefault.java | 4 +- .../blockmanagement/DatanodeManager.java | 70 +++--- .../src/main/resources/hdfs-default.xml | 10 + ...TestReplicationPolicyExcludeSlowNodes.java | 8 +- .../blockmanagement/TestSortLocatedBlock.java | 200 ++++++++++++++++-- 7 files changed, 284 insertions(+), 58 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index bc371ea08ac9e..b9a7bc5eaec73 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -516,6 +516,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // Whether to enable datanode's stale state detection and usage for reads public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode"; public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false; + public static final String DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY = + "dfs.namenode.avoid.read.slow.datanode"; + public static final boolean + DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_DEFAULT = false; // Whether to enable datanode's stale state detection and usage for writes public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY = "dfs.namenode.avoid.write.stale.datanode"; public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 9f7e9d5d7c9dc..3e47e557b0bea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -156,23 +156,36 @@ public int compare(DatanodeInfo a, DatanodeInfo b) { /** * Comparator for sorting DataNodeInfo[] based on - * stale, decommissioned and entering_maintenance states. - * Order: live {@literal ->} stale {@literal ->} entering_maintenance - * {@literal ->} decommissioned + * slow, stale, entering_maintenance and decommissioned states. + * Order: live {@literal ->} slow {@literal ->} stale {@literal ->} + * entering_maintenance {@literal ->} decommissioned */ @InterfaceAudience.Private - public static class ServiceAndStaleComparator extends ServiceComparator { + public static class StaleAndSlowComparator extends ServiceComparator { + private final boolean avoidStaleDataNodesForRead; private final long staleInterval; + private final boolean avoidSlowDataNodesForRead; + private final Set slowNodesUuidSet; /** * Constructor of ServiceAndStaleComparator - * + * @param avoidStaleDataNodesForRead + * Whether or not to avoid using stale DataNodes for reading. * @param interval * The time interval for marking datanodes as stale is passed from - * outside, since the interval may be changed dynamically + * outside, since the interval may be changed dynamically. + * @param avoidSlowDataNodesForRead + * Whether or not to avoid using slow DataNodes for reading. + * @param slowNodesUuidSet + * Slow DataNodes UUID set. */ - public ServiceAndStaleComparator(long interval) { + public StaleAndSlowComparator( + boolean avoidStaleDataNodesForRead, long interval, + boolean avoidSlowDataNodesForRead, Set slowNodesUuidSet) { + this.avoidStaleDataNodesForRead = avoidStaleDataNodesForRead; this.staleInterval = interval; + this.avoidSlowDataNodesForRead = avoidSlowDataNodesForRead; + this.slowNodesUuidSet = slowNodesUuidSet; } @Override @@ -183,9 +196,22 @@ public int compare(DatanodeInfo a, DatanodeInfo b) { } // Stale nodes will be moved behind the normal nodes - boolean aStale = a.isStale(staleInterval); - boolean bStale = b.isStale(staleInterval); - return aStale == bStale ? 0 : (aStale ? 1 : -1); + if (avoidStaleDataNodesForRead) { + boolean aStale = a.isStale(staleInterval); + boolean bStale = b.isStale(staleInterval); + ret = aStale == bStale ? 0 : (aStale ? 1 : -1); + if (ret != 0) { + return ret; + } + } + + // Slow nodes will be moved behind the normal nodes + if (avoidSlowDataNodesForRead) { + boolean aSlow = slowNodesUuidSet.contains(a.getDatanodeUuid()); + boolean bSlow = slowNodesUuidSet.contains(b.getDatanodeUuid()); + ret = aSlow == bSlow ? 0 : (aSlow ? 1 : -1); + } + return ret; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 9f68c36033cfc..e978089433a9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -1104,8 +1104,8 @@ boolean isGoodDatanode(DatanodeDescriptor node, // check if the target is a slow node if (dataNodePeerStatsEnabled && excludeSlowNodesEnabled) { - Set nodes = DatanodeManager.getSlowNodes(); - if (nodes.contains(node)) { + Set slowNodesUuidSet = DatanodeManager.getSlowNodesUuidSet(); + if (slowNodesUuidSet.contains(node.getDatanodeUuid())) { logNodeIsNotChosen(node, NodeNotChosenReason.NODE_SLOW); return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 2e7b338d8578c..68ee16ca6f74b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -140,6 +140,9 @@ public class DatanodeManager { /** Whether or not to avoid using stale DataNodes for reading */ private final boolean avoidStaleDataNodesForRead; + /** Whether or not to avoid using slow DataNodes for reading. */ + private final boolean avoidSlowDataNodesForRead; + /** Whether or not to consider lad for reading. */ private final boolean readConsiderLoad; @@ -210,7 +213,7 @@ public class DatanodeManager { @Nullable private final SlowPeerTracker slowPeerTracker; - private static Set slowNodesSet = Sets.newConcurrentHashSet(); + private static Set slowNodesUuidSet = Sets.newConcurrentHashSet(); private Daemon slowPeerCollectorDaemon; private final long slowPeerCollectionInterval; private final int maxSlowPeerReportNodes; @@ -242,7 +245,6 @@ public class DatanodeManager { } else { networktopology = NetworkTopology.getInstance(conf); } - this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf); this.datanodeAdminManager = new DatanodeAdminManager(namesystem, @@ -273,7 +275,6 @@ public class DatanodeManager { } this.slowDiskTracker = dataNodeDiskStatsEnabled ? new SlowDiskTracker(conf, timer) : null; - this.defaultXferPort = NetUtils.createSocketAddr( conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort(); @@ -294,11 +295,9 @@ public class DatanodeManager { } catch (IOException e) { LOG.error("error reading hosts files: ", e); } - this.dnsToSwitchMapping = ReflectionUtils.newInstance( conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, ScriptBasedMapping.class, DNSToSwitchMapping.class), conf); - this.rejectUnresolvedTopologyDN = conf.getBoolean( DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY, DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT); @@ -313,7 +312,6 @@ public class DatanodeManager { } dnsToSwitchMapping.resolve(locations); } - heartbeatIntervalSeconds = conf.getTimeDuration( DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS); @@ -322,7 +320,6 @@ public class DatanodeManager { DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval + 10 * 1000 * heartbeatIntervalSeconds; - // Effected block invalidate limit is the bigger value between // value configured in hdfs-site.xml, and 20 * HB interval. final int configuredBlockInvalidateLimit = conf.getInt( @@ -335,16 +332,17 @@ public class DatanodeManager { + ": configured=" + configuredBlockInvalidateLimit + ", counted=" + countedBlockInvalidateLimit + ", effected=" + blockInvalidateLimit); - this.checkIpHostnameInRegistration = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY, DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT); LOG.info(DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY + "=" + checkIpHostnameInRegistration); - this.avoidStaleDataNodesForRead = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT); + this.avoidSlowDataNodesForRead = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, + DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_DEFAULT); this.readConsiderLoad = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY, DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_DEFAULT); @@ -389,7 +387,7 @@ private void startSlowPeerCollector() { public void run() { while (true) { try { - slowNodesSet = getSlowPeers(); + slowNodesUuidSet = getSlowPeersUuidSet(); } catch (Exception e) { LOG.error("Failed to collect slow peers", e); } @@ -509,12 +507,16 @@ private boolean isInactive(DatanodeInfo datanode) { return datanode.isDecommissioned() || datanode.isEnteringMaintenance() || (avoidStaleDataNodesForRead && datanode.isStale(staleInterval)); } + + private boolean isSlowNode(String dnUuid) { + return avoidSlowDataNodesForRead && slowNodesUuidSet.contains(dnUuid); + } /** * Sort the non-striped located blocks by the distance to the target host. * - * For striped blocks, it will only move decommissioned/stale nodes to the - * bottom. For example, assume we have storage list: + * For striped blocks, it will only move decommissioned/stale/slow + * nodes to the bottom. For example, assume we have storage list: * d0, d1, d2, d3, d4, d5, d6, d7, d8, d9 * mapping to block indices: * 0, 1, 2, 3, 4, 5, 6, 7, 8, 2 @@ -526,8 +528,11 @@ private boolean isInactive(DatanodeInfo datanode) { */ public void sortLocatedBlocks(final String targetHost, final List locatedBlocks) { - Comparator comparator = avoidStaleDataNodesForRead ? - new DFSUtil.ServiceAndStaleComparator(staleInterval) : + Comparator comparator = + avoidStaleDataNodesForRead || avoidSlowDataNodesForRead ? + new DFSUtil.StaleAndSlowComparator( + avoidStaleDataNodesForRead, staleInterval, + avoidSlowDataNodesForRead, slowNodesUuidSet) : new DFSUtil.ServiceComparator(); // sort located block for (LocatedBlock lb : locatedBlocks) { @@ -540,7 +545,8 @@ public void sortLocatedBlocks(final String targetHost, } /** - * Move decommissioned/stale datanodes to the bottom. After sorting it will + * Move decommissioned/entering_maintenance/stale/slow + * datanodes to the bottom. After sorting it will * update block indices and block tokens respectively. * * @param lb located striped block @@ -571,8 +577,9 @@ private void sortLocatedStripedBlock(final LocatedBlock lb, } /** - * Move decommissioned/entering_maintenance/stale datanodes to the bottom. - * Also, sort nodes by network distance. + * Move decommissioned/entering_maintenance/stale/slow + * datanodes to the bottom. Also, sort nodes by network + * distance. * * @param lb located block * @param targetHost target host @@ -602,12 +609,15 @@ private void sortLocatedBlock(final LocatedBlock lb, String targetHost, } DatanodeInfoWithStorage[] di = lb.getLocations(); - // Move decommissioned/entering_maintenance/stale datanodes to the bottom + // Move decommissioned/entering_maintenance/stale/slow + // datanodes to the bottom Arrays.sort(di, comparator); // Sort nodes by network distance only for located blocks int lastActiveIndex = di.length - 1; - while (lastActiveIndex > 0 && isInactive(di[lastActiveIndex])) { + while (lastActiveIndex > 0 && ( + isSlowNode(di[lastActiveIndex].getDatanodeUuid()) || + isInactive(di[lastActiveIndex]))) { --lastActiveIndex; } int activeLen = lastActiveIndex + 1; @@ -2083,10 +2093,10 @@ public String getSlowPeersReport() { * Returns all tracking slow peers. * @return */ - public Set getSlowPeers() { - Set slowPeersSet = Sets.newConcurrentHashSet(); + public Set getSlowPeersUuidSet() { + Set slowPeersUuidSet = Sets.newConcurrentHashSet(); if (slowPeerTracker == null) { - return slowPeersSet; + return slowPeersUuidSet; } ArrayList slowNodes = slowPeerTracker.getSlowNodes(maxSlowPeerReportNodes); @@ -2099,18 +2109,18 @@ public Set getSlowPeers() { DatanodeDescriptor datanodeByHost = host2DatanodeMap.getDatanodeByHost(ipAddr); if (datanodeByHost != null) { - slowPeersSet.add(datanodeByHost); + slowPeersUuidSet.add(datanodeByHost.getDatanodeUuid()); } } - return slowPeersSet; + return slowPeersUuidSet; } /** - * Returns all tracking slow peers. + * Returns all tracking slow datanodes uuids. * @return */ - public static Set getSlowNodes() { - return slowNodesSet; + public static Set getSlowNodesUuidSet() { + return slowNodesUuidSet; } /** @@ -2128,6 +2138,12 @@ public SlowPeerTracker getSlowPeerTracker() { public SlowDiskTracker getSlowDiskTracker() { return slowDiskTracker; } + + @VisibleForTesting + public void addSlowPeers(String dnUuid) { + slowNodesUuidSet.add(dnUuid); + } + /** * Retrieve information about slow disks as a JSON. * Returns null if we are not tracking slow disks. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index e540a677e0e24..78d8e033ec782 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2110,6 +2110,16 @@ + + dfs.namenode.avoid.read.slow.datanode + false + + Indicate whether or not to avoid reading from "slow" datanodes. + Slow datanodes will be moved to the end of the node list returned + for reading. + + + dfs.namenode.avoid.write.stale.datanode false diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java index f40317d8e70db..f2c24a646b84a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.net.Node; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -100,12 +99,12 @@ public void testChooseTargetExcludeSlowNodes() throws Exception { Thread.sleep(3000); // fetch slow nodes - Set slowPeers = dnManager.getSlowPeers(); + Set slowPeers = dnManager.getSlowPeersUuidSet(); // assert slow nodes assertEquals(3, slowPeers.size()); for (int i = 0; i < slowPeers.size(); i++) { - assertTrue(slowPeers.contains(dataNodes[i])); + assertTrue(slowPeers.contains(dataNodes[i].getDatanodeUuid())); } // mock writer @@ -120,7 +119,8 @@ public void testChooseTargetExcludeSlowNodes() throws Exception { // assert targets assertEquals(3, targets.length); for (int i = 0; i < targets.length; i++) { - assertTrue(!slowPeers.contains(targets[i].getDatanodeDescriptor())); + assertTrue(!slowPeers.contains(targets[i].getDatanodeDescriptor() + .getDatanodeUuid())); } } finally { namenode.getNamesystem().writeUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedBlock.java index 6b96fdf32bf54..45eedace2c40c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedBlock.java @@ -27,34 +27,24 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.util.Time; -import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.Date; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * This class tests the sorting of located blocks based on * multiple states. */ public class TestSortLocatedBlock { - static final Logger LOG = LoggerFactory - .getLogger(TestSortLocatedBlock.class); - private static DatanodeManager dm; private static final long STALE_INTERVAL = 30 * 1000 * 60; - @BeforeClass - public static void setup() throws IOException { - dm = mockDatanodeManager(); - } - /** * Test to verify sorting with multiple state * datanodes exists in storage lists. @@ -73,8 +63,7 @@ public static void setup() throws IOException { * (d4 -> d3 -> d1 -> d2 -> d0). */ @Test(timeout = 30000) - public void testWithMultipleStateDatanodes() { - LOG.info("Starting test testWithMultipleStateDatanodes"); + public void testWithStaleDatanodes() throws IOException { long blockID = Long.MIN_VALUE; int totalDns = 5; DatanodeInfo[] locs = new DatanodeInfo[totalDns]; @@ -106,6 +95,7 @@ public void testWithMultipleStateDatanodes() { 1024L, new Date().getTime()), locs)); // sort located blocks + DatanodeManager dm = mockDatanodeManager(true, false); dm.sortLocatedBlocks(null, locatedBlocks); // get locations after sorting @@ -114,6 +104,9 @@ public void testWithMultipleStateDatanodes() { // assert location order: // live -> stale -> entering_maintenance -> decommissioned + // (d4 -> d3 -> d1 -> d0 -> d2) + // or + // (d4 -> d3 -> d1 -> d2 -> d0). // live assertEquals(locs[4].getIpAddr(), locations[0].getIpAddr()); // stale @@ -126,11 +119,188 @@ public void testWithMultipleStateDatanodes() { && decommissionedNodes.contains(locations[4])); } - private static DatanodeManager mockDatanodeManager() throws IOException { + /** + * Test to verify sorting with multiple state + * datanodes exists in storage lists. + * + * After sorting the expected datanodes list will be: + * live -> slow -> stale -> staleAndSlow -> + * entering_maintenance -> decommissioned. + * + * avoidStaleDataNodesForRead=true && avoidSlowDataNodesForRead=true + * d5 -> d4 -> d3 -> d2 -> d1 -> d0 + */ + @Test(timeout = 30000) + public void testAviodStaleAndSlowDatanodes() throws IOException { + DatanodeManager dm = mockDatanodeManager(true, true); + DatanodeInfo[] locs = mockDatanodes(dm); + + ArrayList locatedBlocks = new ArrayList<>(); + locatedBlocks.add(new LocatedBlock( + new ExtendedBlock("pool", Long.MIN_VALUE, + 1024L, new Date().getTime()), locs)); + + // sort located blocks + dm.sortLocatedBlocks(null, locatedBlocks); + + // get locations after sorting + LocatedBlock locatedBlock = locatedBlocks.get(0); + DatanodeInfoWithStorage[] locations = locatedBlock.getLocations(); + + // assert location order: + // live -> stale -> entering_maintenance -> decommissioned + // live + assertEquals(locs[5].getIpAddr(), locations[0].getIpAddr()); + // slow + assertEquals(locs[4].getIpAddr(), locations[1].getIpAddr()); + // stale + assertEquals(locs[3].getIpAddr(), locations[2].getIpAddr()); + // stale and slow + assertEquals(locs[2].getIpAddr(), locations[3].getIpAddr()); + // entering_maintenance + assertEquals(locs[1].getIpAddr(), locations[4].getIpAddr()); + // decommissioned + assertEquals(locs[0].getIpAddr(), locations[5].getIpAddr()); + } + + /** + * Test to verify sorting with multiple state + * datanodes exists in storage lists. + * + * After sorting the expected datanodes list will be: + * (live <-> slow) -> (stale <-> staleAndSlow) -> + * entering_maintenance -> decommissioned. + * + * avoidStaleDataNodesForRead=true && avoidSlowDataNodesForRead=false + * (d5 <-> d4) -> (d3 <-> d2) -> d1 -> d0 + */ + @Test(timeout = 30000) + public void testAviodStaleDatanodes() throws IOException { + DatanodeManager dm = mockDatanodeManager(true, false); + DatanodeInfo[] locs = mockDatanodes(dm); + + ArrayList locatedBlocks = new ArrayList<>(); + locatedBlocks.add(new LocatedBlock( + new ExtendedBlock("pool", Long.MIN_VALUE, + 1024L, new Date().getTime()), locs)); + + // sort located blocks + dm.sortLocatedBlocks(null, locatedBlocks); + + // get locations after sorting + LocatedBlock locatedBlock = locatedBlocks.get(0); + DatanodeInfoWithStorage[] locations = locatedBlock.getLocations(); + + // assert location order: + // live -> stale -> entering_maintenance -> decommissioned + // live + assertTrue((locs[5].getIpAddr() == locations[0].getIpAddr() && + locs[4].getIpAddr() == locations[1].getIpAddr()) || + (locs[5].getIpAddr() == locations[1].getIpAddr() && + locs[4].getIpAddr() == locations[0].getIpAddr())); + // stale + assertTrue((locs[3].getIpAddr() == locations[2].getIpAddr() && + locs[2].getIpAddr() == locations[3].getIpAddr()) || + (locs[3].getIpAddr() == locations[3].getIpAddr() && + locs[2].getIpAddr() == locations[2].getIpAddr())); + // entering_maintenance + assertEquals(locs[1].getIpAddr(), locations[4].getIpAddr()); + // decommissioned + assertEquals(locs[0].getIpAddr(), locations[5].getIpAddr()); + } + + /** + * Test to verify sorting with multiple state + * datanodes exists in storage lists. + * + * After sorting the expected datanodes list will be: + * (live <-> stale) -> (slow <-> staleAndSlow) -> + * entering_maintenance -> decommissioned. + * + * avoidStaleDataNodesForRead=false && avoidSlowDataNodesForRead=true + * (d5 -> d3) -> (d4 <-> d2) -> d1 -> d0 + */ + @Test(timeout = 30000) + public void testAviodSlowDatanodes() throws IOException { + DatanodeManager dm = mockDatanodeManager(false, true); + DatanodeInfo[] locs = mockDatanodes(dm); + + ArrayList locatedBlocks = new ArrayList<>(); + locatedBlocks.add(new LocatedBlock( + new ExtendedBlock("pool", Long.MIN_VALUE, + 1024L, new Date().getTime()), locs)); + + // sort located blocks + dm.sortLocatedBlocks(null, locatedBlocks); + + // get locations after sorting + LocatedBlock locatedBlock = locatedBlocks.get(0); + DatanodeInfoWithStorage[] locations = locatedBlock.getLocations(); + + // assert location order: + // live -> slow -> entering_maintenance -> decommissioned + // live + assertTrue((locs[5].getIpAddr() == locations[0].getIpAddr() && + locs[3].getIpAddr() == locations[1].getIpAddr()) || + (locs[5].getIpAddr() == locations[1].getIpAddr() && + locs[3].getIpAddr() == locations[0].getIpAddr())); + // slow + assertTrue((locs[4].getIpAddr() == locations[2].getIpAddr() && + locs[2].getIpAddr() == locations[3].getIpAddr()) || + (locs[4].getIpAddr() == locations[3].getIpAddr() && + locs[2].getIpAddr() == locations[2].getIpAddr())); + // entering_maintenance + assertEquals(locs[1].getIpAddr(), locations[4].getIpAddr()); + // decommissioned + assertEquals(locs[0].getIpAddr(), locations[5].getIpAddr()); + } + + /** + * We mock the following list of datanodes, and create LocatedBlock. + * d0 - decommissioned + * d1 - entering_maintenance + * d2 - stale and slow + * d3 - stale + * d4 - slow + * d5 - live(in-service) + */ + private static DatanodeInfo[] mockDatanodes(DatanodeManager dm) { + int totalDns = 6; + DatanodeInfo[] locs = new DatanodeInfo[totalDns]; + + // create datanodes + for (int i = 0; i < totalDns; i++) { + String ip = i + "." + i + "." + i + "." + i; + locs[i] = DFSTestUtil.getDatanodeInfo(ip); + locs[i].setLastUpdateMonotonic(Time.monotonicNow()); + } + // set decommissioned state + locs[0].setDecommissioned(); + // set entering_maintenance state + locs[1].startMaintenance(); + // set stale and slow state + locs[2].setLastUpdateMonotonic(Time.monotonicNow() - + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT * 1000 - 1); + dm.addSlowPeers(locs[2].getDatanodeUuid()); + // set stale state + locs[3].setLastUpdateMonotonic(Time.monotonicNow() - + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT * 1000 - 1); + // set slow state + dm.addSlowPeers(locs[4].getDatanodeUuid()); + + return locs; + } + + private static DatanodeManager mockDatanodeManager( + boolean avoidStaleDNForRead, boolean avoidSlowDNForRead) + throws IOException { Configuration conf = new Configuration(); conf.setBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, - true); + avoidStaleDNForRead); + conf.setBoolean( + DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, + avoidSlowDNForRead); conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, STALE_INTERVAL); FSNamesystem fsn = Mockito.mock(FSNamesystem.class); From 64e4dfb213d696a8b89be23a7532da2b3e3013c3 Mon Sep 17 00:00:00 2001 From: litao Date: Thu, 24 Jun 2021 18:29:42 +0800 Subject: [PATCH 438/666] HDFS-16085. Move the getPermissionChecker out of the read lock (#3134). Contributed by tomscut. Signed-off-by: Takanobu Asanuma Signed-off-by: Ayush Saxena --- .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index a39648979c939..578c9ebb5a883 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -574,10 +574,11 @@ private LocatedBlocks getBlockLocations(String path, HdfsFileStatus file) final FSNamesystem fsn = namenode.getNamesystem(); final String operationName = "fsckGetBlockLocations"; FSPermissionChecker.setOperationType(operationName); + FSPermissionChecker pc = fsn.getPermissionChecker(); fsn.readLock(); try { blocks = FSDirStatAndListingOp.getBlockLocations( - fsn.getFSDirectory(), fsn.getPermissionChecker(), + fsn.getFSDirectory(), pc, path, 0, fileLen, false) .blocks; } catch (FileNotFoundException fnfe) { From fb0a20b6fe201c20ad20310b8a061db9f860d070 Mon Sep 17 00:00:00 2001 From: Ahmed Hussein <50450311+amahussein@users.noreply.github.com> Date: Thu, 24 Jun 2021 07:27:52 -0500 Subject: [PATCH 439/666] HADOOP-17769. Upgrade JUnit to 4.13.2. fixes TestBlockRecovery (#3130). Contributed by Ahmed Hussein. Signed-off-by: Ayush Saxena --- LICENSE-binary | 2 +- hadoop-project/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LICENSE-binary b/LICENSE-binary index 216fbab1d425b..5ccc13cf6e31b 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -502,7 +502,7 @@ javax.xml.bind:jaxb-api:2.2.11 Eclipse Public License 1.0 -------------------------- -junit:junit:4.12 +junit:junit:4.13.2 HSQL License diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 665370436bed4..68e61143b3fda 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -198,7 +198,7 @@ 1.26 1.4.8 2.0.2 - 4.13.1 + 4.13.2 5.5.1 5.5.1 1.5.1 From 38d276fe9bf9665e013f18de831895bd0909716e Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Thu, 24 Jun 2021 16:37:27 +0100 Subject: [PATCH 440/666] HADOOP-17771. S3AFS creation fails "Unable to find a region via the region provider chain." (#3133) This addresses the regression in Hadoop 3.3.1 where if no S3 endpoint is set in fs.s3a.endpoint, S3A filesystem creation may fail on non-EC2 deployments, depending on the local host environment setup. * If fs.s3a.endpoint is empty/null, and fs.s3a.endpoint.region is null, the region is set to "us-east-1". * If fs.s3a.endpoint.region is explicitly set to "" then the client falls back to the SDK region resolution chain; this works on EC2 * Details in troubleshooting.md, including a workaround for Hadoop-3.3.1+ * Also contains some minor restructuring of troubleshooting.md Contributed by Steve Loughran. --- .../org/apache/hadoop/fs/s3a/Constants.java | 6 + .../hadoop/fs/s3a/DefaultS3ClientFactory.java | 46 +- .../hadoop/fs/s3a/impl/InternalConstants.java | 5 + .../site/markdown/tools/hadoop-aws/index.md | 12 +- .../tools/hadoop-aws/troubleshooting_s3a.md | 622 ++++++++++-------- .../hadoop/fs/s3a/ITestS3AEndpointRegion.java | 99 +++ 6 files changed, 526 insertions(+), 264 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index 8dc6bba1ccef5..b741bc2301744 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -1087,4 +1087,10 @@ private Constants() { */ public static final String AWS_REGION = "fs.s3a.endpoint.region"; + /** + * The special S3 region which can be used to talk to any bucket. + * Value {@value}. + */ + public static final String AWS_S3_CENTRAL_REGION = "us-east-1"; + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java index 6e84497cb99d0..7dc920ce50c1d 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java @@ -22,6 +22,7 @@ import java.net.URI; import com.amazonaws.ClientConfiguration; +import com.amazonaws.SdkClientException; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.handlers.RequestHandler2; import com.amazonaws.services.s3.AmazonS3; @@ -41,10 +42,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.s3a.statistics.impl.AwsStatisticsCollector; +import org.apache.hadoop.fs.store.LogExactlyOnce; import static org.apache.hadoop.fs.s3a.Constants.AWS_REGION; +import static org.apache.hadoop.fs.s3a.Constants.AWS_S3_CENTRAL_REGION; import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING; import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING_DEFAULT; +import static org.apache.hadoop.fs.s3a.S3AUtils.translateException; /** * The default {@link S3ClientFactory} implementation. @@ -64,6 +68,19 @@ public class DefaultS3ClientFactory extends Configured protected static final Logger LOG = LoggerFactory.getLogger(DefaultS3ClientFactory.class); + /** + * A one-off warning of default region chains in use. + */ + private static final LogExactlyOnce WARN_OF_DEFAULT_REGION_CHAIN = + new LogExactlyOnce(LOG); + + /** + * Warning message printed when the SDK Region chain is in use. + */ + private static final String SDK_REGION_CHAIN_IN_USE = + "S3A filesystem client is using" + + " the SDK region resolution chain."; + /** * Create the client by preparing the AwsConf configuration * and then invoking {@code buildAmazonS3Client()}. @@ -94,9 +111,14 @@ public AmazonS3 createS3Client( awsConf.setUserAgentSuffix(parameters.getUserAgentSuffix()); } - return buildAmazonS3Client( - awsConf, - parameters); + try { + return buildAmazonS3Client( + awsConf, + parameters); + } catch (SdkClientException e) { + // SDK refused to build. + throw translateException("creating AWS S3 client", uri.toString(), e); + } } /** @@ -109,6 +131,7 @@ public AmazonS3 createS3Client( * @param awsConf AWS configuration * @param parameters parameters * @return new AmazonS3 client + * @throws SdkClientException if the configuration is invalid. */ protected AmazonS3 buildAmazonS3Client( final ClientConfiguration awsConf, @@ -141,6 +164,21 @@ protected AmazonS3 buildAmazonS3Client( // no idea what the endpoint is, so tell the SDK // to work it out at the cost of an extra HEAD request b.withForceGlobalBucketAccessEnabled(true); + // HADOOP-17771 force set the region so the build process doesn't halt. + String region = getConf().getTrimmed(AWS_REGION, AWS_S3_CENTRAL_REGION); + LOG.debug("fs.s3a.endpoint.region=\"{}\"", region); + if (!region.isEmpty()) { + // there's either an explicit region or we have fallen back + // to the central one. + LOG.debug("Using default endpoint; setting region to {}", region); + b.setRegion(region); + } else { + // no region. + // allow this if people really want it; it is OK to rely on this + // when deployed in EC2. + WARN_OF_DEFAULT_REGION_CHAIN.warn(SDK_REGION_CHAIN_IN_USE); + LOG.debug(SDK_REGION_CHAIN_IN_USE); + } } final AmazonS3 client = b.build(); return client; @@ -206,7 +244,7 @@ protected static AmazonS3 configureAmazonS3Client(AmazonS3 s3, createEndpointConfiguration( final String endpoint, final ClientConfiguration awsConf, String awsRegion) { - LOG.debug("Creating endpoint configuration for {}", endpoint); + LOG.debug("Creating endpoint configuration for \"{}\"", endpoint); if (endpoint == null || endpoint.isEmpty()) { // the default endpoint...we should be using null at this point. LOG.debug("Using default endpoint -no need to generate a configuration"); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java index d6142f49c94a2..cf962b87a4176 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java @@ -121,4 +121,9 @@ private InternalConstants() { */ public static final int DEFAULT_UPLOAD_PART_COUNT_LIMIT = 10000; + /** + * The system property used by the AWS SDK to identify the region. + */ + public static final String AWS_REGION_SYSPROP = "aws.region"; + } diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md index aff7694c52aa7..48b99caf651bf 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md @@ -438,6 +438,12 @@ you'll need to remove the `profile` prefix from the AWS configuration section he aws_session_token = ... aws_security_token = ... ``` +Note: + +1. The `region` setting is only used if `fs.s3a.endpoint.region` is set to the empty string. +1. For the credentials to be available to applications running in a Hadoop cluster, the + configuration files MUST be in the `~/.aws/` directory on the local filesystem in + all hosts in the cluster. ### Using Session Credentials with `TemporaryAWSCredentialsProvider` @@ -802,8 +808,10 @@ options are covered in [Testing](./testing.md). fs.s3a.endpoint.region AWS S3 region for a bucket, which bypasses the parsing of - fs.s3a.endpoint to know the region. Would be helpful in avoiding errors - while using privateLink URL and explicitly set the bucket region. + fs.s3a.endpoint to know the region. Would be helpful in avoiding errors + while using privateLink URL and explicitly set the bucket region. + If set to a blank string (or 1+ space), falls back to the + (potentially brittle) SDK region resolution process. diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md index d91607d3fd19d..30047edfb5c38 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md @@ -18,11 +18,17 @@ ## Introduction -Common problems working with S3 are +Common problems working with S3 are: -1. Classpath setup -1. Authentication -1. Incorrect configuration +1. [Classpath setup](#classpath) +1. [Authentication](#authentication) +1. [Access Denial](#access_denied) +1. [Connectivity Problems](#connectivity) +1. [File System Semantics](#semantics) +1. [Encryption](#encryption) +1. [Other Errors](#other) + +This document also includes some [best pactises](#best) to aid troubleshooting. Troubleshooting IAM Assumed Roles is covered in its @@ -572,7 +578,7 @@ S3 sts endpoint and region like the following: ## Connectivity Problems -### Error message "The bucket you are attempting to access must be addressed using the specified endpoint" +### Error "The bucket you are attempting to access must be addressed using the specified endpoint" This surfaces when `fs.s3a.endpoint` is configured to use an S3 service endpoint which is neither the original AWS one, `s3.amazonaws.com` , nor the one where @@ -611,6 +617,101 @@ can be used: Using the explicit endpoint for the region is recommended for speed and to use the V4 signing API. +### `Unable to find a region via the region provider chain` + +S3A client creation fails, possibly after a pause of some seconds. + +This failure surfaces when _all_ the following conditions are met: + +1. Deployment outside EC2. +1. `fs.s3a.endpoint` is unset. +1. `fs.s3a.endpoint.region` is set to `""`. (Hadoop 3.3.2+ only) +1. Without the file `~/.aws/config` existing or without a region set in it. +1. Without the JVM system property `aws.region` declaring a region. +1. Without the environment variable `AWS_REGION` declaring a region. + +Stack trace (Hadoop 3.3.1): +``` +Caused by: com.amazonaws.SdkClientException: Unable to find a region via the region provider chain. + Must provide an explicit region in the builder or setup environment to supply a region. + at com.amazonaws.client.builder.AwsClientBuilder.setRegion(AwsClientBuilder.java:462) + at com.amazonaws.client.builder.AwsClientBuilder.configureMutableProperties(AwsClientBuilder.java:424) + at com.amazonaws.client.builder.AwsSyncClientBuilder.build(AwsSyncClientBuilder.java:46) + at org.apache.hadoop.fs.s3a.DefaultS3ClientFactory.buildAmazonS3Client(DefaultS3ClientFactory.java:145) + at org.apache.hadoop.fs.s3a.DefaultS3ClientFactory.createS3Client(DefaultS3ClientFactory.java:97) + at org.apache.hadoop.fs.s3a.S3AFileSystem.bindAWSClient(S3AFileSystem.java:788) + at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:478) +``` + +Log and stack trace on later releases, with +"S3A filesystem client is using the SDK region resolution chain." +warning that the SDK resolution chain is in use: + +``` +2021-06-23 19:56:55,971 [main] WARN s3a.DefaultS3ClientFactory (LogExactlyOnce.java:warn(39)) - + S3A filesystem client is using the SDK region resolution chain. + +2021-06-23 19:56:56,073 [main] WARN fs.FileSystem (FileSystem.java:createFileSystem(3464)) - + Failed to initialize fileystem s3a://osm-pds/planet: + org.apache.hadoop.fs.s3a.AWSClientIOException: creating AWS S3 client on s3a://osm-pds: + com.amazonaws.SdkClientException: Unable to find a region via the region provider chain. + Must provide an explicit region in the builder or setup environment to supply a region.: + Unable to find a region via the region provider chain. + Must provide an explicit region in the builder or setup environment to supply a region. + at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:208) + at org.apache.hadoop.fs.s3a.DefaultS3ClientFactory.createS3Client(DefaultS3ClientFactory.java:122) + at org.apache.hadoop.fs.s3a.S3AFileSystem.bindAWSClient(S3AFileSystem.java:788) + at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:478) + at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3460) + at org.apache.hadoop.fs.FileSystem.access$300(FileSystem.java:172) + at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3565) + at org.apache.hadoop.fs.FileSystem$Cache.getUnique(FileSystem.java:3518) + at org.apache.hadoop.fs.FileSystem.newInstance(FileSystem.java:592) +Caused by: com.amazonaws.SdkClientException: Unable to find a region via the region provider chain. + Must provide an explicit region in the builder or setup environment to supply a region. + at com.amazonaws.client.builder.AwsClientBuilder.setRegion(AwsClientBuilder.java:462) + at com.amazonaws.client.builder.AwsClientBuilder.configureMutableProperties(AwsClientBuilder.java:424) + at com.amazonaws.client.builder.AwsSyncClientBuilder.build(AwsSyncClientBuilder.java:46) + at org.apache.hadoop.fs.s3a.DefaultS3ClientFactory.buildAmazonS3Client(DefaultS3ClientFactory.java:185) + at org.apache.hadoop.fs.s3a.DefaultS3ClientFactory.createS3Client(DefaultS3ClientFactory.java:117) + ... 21 more +``` + +Due to changes in S3 client construction in Hadoop 3.3.1 this option surfaces in +non-EC2 deployments where no AWS endpoint was declared: +[HADOOP-17771](https://issues.apache.org/jira/browse/HADOOP-17771). On Hadoop +3.3.2 and later it takes active effort to create this stack trace. + +**Fix: set `fs.s3a.endpoint` to `s3.amazonaws.com`** + +Set `fs.s3a.endpoint` to the endpoint where the data is stored +(best), or to `s3.amazonaws.com` (second-best). + +```xml + + fs.s3a.endpoint + s3.amazonaws.com + +``` + +For Apache Spark, this can be done in `spark-defaults.conf` + +``` +spark.hadoop.fs.s3a.endpoint s3.amazonaws.com +``` + +Or in Scala by editing the spark configuration during setup. + +```scala +sc.hadoopConfiguration.set("fs.s3a.endpoint", "s3.amazonaws.com") +``` + +Tip: set the logging of `org.apache.hadoop.fs.s3a.DefaultS3ClientFactory` +to `DEBUG` to see how the endpoint and region configuration is determined. + +``` +log4j.logger.org.apache.hadoop.fs.s3a.DefaultS3ClientFactory=DEBUG +``` ### "Timeout waiting for connection from pool" when writing data @@ -792,257 +893,10 @@ Again, we believe this is caused by the connection to S3 being broken. It may go away if the operation is retried. -## Other Errors - -### `SdkClientException` Unable to verify integrity of data upload - -Something has happened to the data as it was uploaded. - -``` -Caused by: org.apache.hadoop.fs.s3a.AWSClientIOException: saving output on dest/_task_tmp.-ext-10000/_tmp.000000_0: - com.amazonaws.AmazonClientException: Unable to verify integrity of data upload. - Client calculated content hash (contentMD5: L75PalQk0CIhTp04MStVOA== in base 64) - didn't match hash (etag: 37ace01f2c383d6b9b3490933c83bb0f in hex) calculated by Amazon S3. - You may need to delete the data stored in Amazon S3. - (metadata.contentMD5: L75PalQk0CIhTp04MStVOA==, md5DigestStream: null, - bucketName: ext2, key: dest/_task_tmp.-ext-10000/_tmp.000000_0): - at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:144) - at org.apache.hadoop.fs.s3a.S3AOutputStream.close(S3AOutputStream.java:121) - at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72) - at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) - at org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat$1.close(HiveIgnoreKeyTextOutputFormat.java:99) - at org.apache.hadoop.hive.ql.exec.FileSinkOperator$FSPaths.closeWriters(FileSinkOperator.java:190) - ... 22 more -Caused by: com.amazonaws.AmazonClientException: Unable to verify integrity of data upload. - Client calculated content hash (contentMD5: L75PalQk0CIhTp04MStVOA== in base 64) - didn't match hash (etag: 37ace01f2c383d6b9b3490933c83bb0f in hex) calculated by Amazon S3. - You may need to delete the data stored in Amazon S3. - (metadata.contentMD5: L75PalQk0CIhTp04MStVOA==, md5DigestStream: null, - bucketName: ext2, key: dest/_task_tmp.-ext-10000/_tmp.000000_0) - at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1492) - at com.amazonaws.services.s3.transfer.internal.UploadCallable.uploadInOneChunk(UploadCallable.java:131) - at com.amazonaws.services.s3.transfer.internal.UploadCallable.call(UploadCallable.java:123) - at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:139) - at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:47) - ... 4 more -``` - -As it uploads data to S3, the AWS SDK builds up an MD5 checksum of what was -PUT/POSTed. When S3 returns the checksum of the uploaded data, that is compared -with the local checksum. If there is a mismatch, this error is reported. - -The uploaded data is already on S3 and will stay there, though if this happens -during a multipart upload, it may not be visible (but still billed: clean up your -multipart uploads via the `hadoop s3guard uploads` command). - -Possible causes for this - -1. A (possibly transient) network problem, including hardware faults. -1. A proxy server is doing bad things to the data. -1. Some signing problem, especially with third-party S3-compatible object stores. - -This is a very, very rare occurrence. - -If the problem is a signing one, try changing the signature algorithm. - -```xml - - fs.s3a.signing-algorithm - S3SignerType - -``` - -We cannot make any promises that it will work, -only that it has been known to make the problem go away "once" - -### `AWSS3IOException` The Content-MD5 you specified did not match what we received - -Reads work, but writes, even `mkdir`, fail: - -``` -org.apache.hadoop.fs.s3a.AWSS3IOException: copyFromLocalFile(file:/tmp/hello.txt, s3a://bucket/hello.txt) - on file:/tmp/hello.txt: - The Content-MD5 you specified did not match what we received. - (Service: Amazon S3; Status Code: 400; Error Code: BadDigest; Request ID: 4018131225), - S3 Extended Request ID: null - at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:127) - at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:69) - at org.apache.hadoop.fs.s3a.S3AFileSystem.copyFromLocalFile(S3AFileSystem.java:1494) - at org.apache.hadoop.tools.cloudup.Cloudup.uploadOneFile(Cloudup.java:466) - at org.apache.hadoop.tools.cloudup.Cloudup.access$000(Cloudup.java:63) - at org.apache.hadoop.tools.cloudup.Cloudup$1.call(Cloudup.java:353) - at org.apache.hadoop.tools.cloudup.Cloudup$1.call(Cloudup.java:350) - at java.util.concurrent.FutureTask.run(FutureTask.java:266) - at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) - at java.util.concurrent.FutureTask.run(FutureTask.java:266) - at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) - at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) - at java.lang.Thread.run(Thread.java:748) -Caused by: com.amazonaws.services.s3.model.AmazonS3Exception: - The Content-MD5 you specified did not match what we received. - (Service: Amazon S3; Status Code: 400; Error Code: BadDigest; Request ID: 4018131225), - S3 Extended Request ID: null - at com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1307) - at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:894) - at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:597) - at com.amazonaws.http.AmazonHttpClient.doExecute(AmazonHttpClient.java:363) - at com.amazonaws.http.AmazonHttpClient.executeWithTimer(AmazonHttpClient.java:329) - at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:308) - at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:3659) - at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1422) - at com.amazonaws.services.s3.transfer.internal.UploadCallable.uploadInOneChunk(UploadCallable.java:131) - at com.amazonaws.services.s3.transfer.internal.UploadCallable.call(UploadCallable.java:123) - at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:139) - at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:47) - at org.apache.hadoop.fs.s3a.BlockingThreadPoolExecutorService$CallableWithPermitRelease.call(BlockingThreadPoolExecutorService.java:239) - ... 4 more -``` - -This stack trace was seen when interacting with a third-party S3 store whose -expectations of headers related to the AWS V4 signing mechanism was not -compatible with that of the specific AWS SDK Hadoop was using. - -Workaround: revert to V2 signing. - -```xml - - fs.s3a.signing-algorithm - S3SignerType - -``` - -### When writing data: "java.io.FileNotFoundException: Completing multi-part upload" - - -A multipart upload was trying to complete, but failed as there was no upload -with that ID. - -``` -java.io.FileNotFoundException: Completing multi-part upload on fork-5/test/multipart/1c397ca6-9dfb-4ac1-9cf7-db666673246b: - com.amazonaws.services.s3.model.AmazonS3Exception: The specified upload does not exist. - The upload ID may be invalid, or the upload may have been aborted or completed. - (Service: Amazon S3; Status Code: 404; Error Code: NoSuchUpload; - at com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1182) - at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:770) - at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:489) - at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:310) - at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:3785) - at com.amazonaws.services.s3.AmazonS3Client.completeMultipartUpload(AmazonS3Client.java:2705) - at org.apache.hadoop.fs.s3a.S3ABlockOutputStream$MultiPartUpload.complete(S3ABlockOutputStream.java:473) - at org.apache.hadoop.fs.s3a.S3ABlockOutputStream$MultiPartUpload.access$200(S3ABlockOutputStream.java:382) - at org.apache.hadoop.fs.s3a.S3ABlockOutputStream.close(S3ABlockOutputStream.java:272) - at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72) - at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) -``` - -This can happen when all outstanding uploads have been aborted, including -the active ones. - -If the bucket has a lifecycle policy of deleting multipart uploads, make -sure that the expiry time of the deletion is greater than that required -for all open writes to complete the write, -*and for all jobs using the S3A committers to commit their work.* - - -### Application hangs after reading a number of files - - -The pool of https client connections and/or IO threads have been used up, -and none are being freed. - - -1. The pools aren't big enough. See ["Timeout waiting for connection from pool"](#timeout_from_pool) -2. Likely root cause: whatever code is reading files isn't calling `close()` -on the input streams. Make sure your code does this! -And if it's someone else's: make sure you have a recent version; search their -issue trackers to see if its a known/fixed problem. -If not, it's time to work with the developers, or come up with a workaround -(i.e closing the input stream yourself). - - - -### Issue: when writing data, HTTP Exceptions logged at info from `AmazonHttpClient` - -``` -[s3a-transfer-shared-pool4-t6] INFO http.AmazonHttpClient (AmazonHttpClient.java:executeHelper(496)) - - Unable to execute HTTP request: hwdev-steve-ireland-new.s3.amazonaws.com:443 failed to respond -org.apache.http.NoHttpResponseException: bucket.s3.amazonaws.com:443 failed to respond - at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:143) - at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:57) - at org.apache.http.impl.io.AbstractMessageParser.parse(AbstractMessageParser.java:261) - at org.apache.http.impl.AbstractHttpClientConnection.receiveResponseHeader(AbstractHttpClientConnection.java:283) - at org.apache.http.impl.conn.DefaultClientConnection.receiveResponseHeader(DefaultClientConnection.java:259) - at org.apache.http.impl.conn.ManagedClientConnectionImpl.receiveResponseHeader(ManagedClientConnectionImpl.java:209) - at org.apache.http.protocol.HttpRequestExecutor.doReceiveResponse(HttpRequestExecutor.java:272) - at com.amazonaws.http.protocol.SdkHttpRequestExecutor.doReceiveResponse(SdkHttpRequestExecutor.java:66) - at org.apache.http.protocol.HttpRequestExecutor.execute(HttpRequestExecutor.java:124) - at org.apache.http.impl.client.DefaultRequestDirector.tryExecute(DefaultRequestDirector.java:686) - at org.apache.http.impl.client.DefaultRequestDirector.execute(DefaultRequestDirector.java:488) - at org.apache.http.impl.client.AbstractHttpClient.doExecute(AbstractHttpClient.java:884) - at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:82) - at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:55) - at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:728) - at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:489) - at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:310) - at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:3785) - at com.amazonaws.services.s3.AmazonS3Client.copyPart(AmazonS3Client.java:1731) - at com.amazonaws.services.s3.transfer.internal.CopyPartCallable.call(CopyPartCallable.java:41) - at com.amazonaws.services.s3.transfer.internal.CopyPartCallable.call(CopyPartCallable.java:28) - at org.apache.hadoop.fs.s3a.BlockingThreadPoolExecutorService$CallableWithPermitRelease.call(BlockingThreadPoolExecutorService.java:239) - at java.util.concurrent.FutureTask.run(FutureTask.java:266) - at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) - at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) - at java.lang.Thread.run(Thread.java:745) -``` - -These are HTTP I/O exceptions caught and logged inside the AWS SDK. The client -will attempt to retry the operation; it may just be a transient event. If there -are many such exceptions in logs, it may be a symptom of connectivity or network -problems. - -### `AWSBadRequestException` IllegalLocationConstraintException/The unspecified location constraint is incompatible - -``` - Cause: org.apache.hadoop.fs.s3a.AWSBadRequestException: put on : - com.amazonaws.services.s3.model.AmazonS3Exception: - The unspecified location constraint is incompatible for the region specific - endpoint this request was sent to. - (Service: Amazon S3; Status Code: 400; Error Code: IllegalLocationConstraintException; - - at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:178) - at org.apache.hadoop.fs.s3a.S3ALambda.execute(S3ALambda.java:64) - at org.apache.hadoop.fs.s3a.WriteOperationHelper.uploadObject(WriteOperationHelper.java:451) - at org.apache.hadoop.fs.s3a.commit.magic.MagicCommitTracker.aboutToComplete(MagicCommitTracker.java:128) - at org.apache.hadoop.fs.s3a.S3ABlockOutputStream.close(S3ABlockOutputStream.java:373) - at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72) - at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101) - at org.apache.hadoop.hive.ql.io.orc.WriterImpl.close(WriterImpl.java:2429) - at org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.close(OrcOutputFormat.java:106) - at org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.close(OrcOutputFormat.java:91) - ... - Cause: com.amazonaws.services.s3.model.AmazonS3Exception: - The unspecified location constraint is incompatible for the region specific endpoint - this request was sent to. (Service: Amazon S3; Status Code: 400; Error Code: IllegalLocationConstraintException; - Request ID: EEBC5A08BCB3A645) - at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1588) - at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1258) - at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1030) - at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:742) - at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:716) - at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699) - at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667) - at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649) - at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513) - at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4221) - ... -``` - -Something has been trying to write data to "/". - -## File System Semantics +## File System Semantics These are the issues where S3 does not appear to behave the way a filesystem -"should". +"should". That's because it "isn't". ### File not visible/saved @@ -1185,7 +1039,7 @@ We also recommend using applications/application options which do not rename files when committing work or when copying data to S3, but instead write directly to the final destination. -## Rename not behaving as "expected" +### Rename not behaving as "expected" S3 is not a filesystem. The S3A connector mimics file and directory rename by @@ -1303,7 +1157,7 @@ is used, no encryption is specified, or the SSE-C specified is incorrect. 2. A directory is encrypted with a SSE-C keyA and the user is trying to move a file using configured SSE-C keyB into that structure. -## Message appears in logs "Not all bytes were read from the S3ObjectInputStream" +### Message appears in logs "Not all bytes were read from the S3ObjectInputStream" This is a message which can be generated by the Amazon SDK when the client application @@ -1378,8 +1232,250 @@ The specified bucket does not exist at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1367) ``` +## Other Errors + +### `SdkClientException` Unable to verify integrity of data upload + +Something has happened to the data as it was uploaded. + +``` +Caused by: org.apache.hadoop.fs.s3a.AWSClientIOException: saving output on dest/_task_tmp.-ext-10000/_tmp.000000_0: + com.amazonaws.AmazonClientException: Unable to verify integrity of data upload. + Client calculated content hash (contentMD5: L75PalQk0CIhTp04MStVOA== in base 64) + didn't match hash (etag: 37ace01f2c383d6b9b3490933c83bb0f in hex) calculated by Amazon S3. + You may need to delete the data stored in Amazon S3. + (metadata.contentMD5: L75PalQk0CIhTp04MStVOA==, md5DigestStream: null, + bucketName: ext2, key: dest/_task_tmp.-ext-10000/_tmp.000000_0): + at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:144) + at org.apache.hadoop.fs.s3a.S3AOutputStream.close(S3AOutputStream.java:121) + at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72) + at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) + at org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat$1.close(HiveIgnoreKeyTextOutputFormat.java:99) + at org.apache.hadoop.hive.ql.exec.FileSinkOperator$FSPaths.closeWriters(FileSinkOperator.java:190) + ... 22 more +Caused by: com.amazonaws.AmazonClientException: Unable to verify integrity of data upload. + Client calculated content hash (contentMD5: L75PalQk0CIhTp04MStVOA== in base 64) + didn't match hash (etag: 37ace01f2c383d6b9b3490933c83bb0f in hex) calculated by Amazon S3. + You may need to delete the data stored in Amazon S3. + (metadata.contentMD5: L75PalQk0CIhTp04MStVOA==, md5DigestStream: null, + bucketName: ext2, key: dest/_task_tmp.-ext-10000/_tmp.000000_0) + at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1492) + at com.amazonaws.services.s3.transfer.internal.UploadCallable.uploadInOneChunk(UploadCallable.java:131) + at com.amazonaws.services.s3.transfer.internal.UploadCallable.call(UploadCallable.java:123) + at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:139) + at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:47) + ... 4 more +``` + +As it uploads data to S3, the AWS SDK builds up an MD5 checksum of what was +PUT/POSTed. When S3 returns the checksum of the uploaded data, that is compared +with the local checksum. If there is a mismatch, this error is reported. + +The uploaded data is already on S3 and will stay there, though if this happens +during a multipart upload, it may not be visible (but still billed: clean up +your multipart uploads via the `hadoop s3guard uploads` command). + +Possible causes for this + +1. A (possibly transient) network problem, including hardware faults. +1. A proxy server is doing bad things to the data. +1. Some signing problem, especially with third-party S3-compatible object + stores. + +This is a very, very rare occurrence. + +If the problem is a signing one, try changing the signature algorithm. + +```xml + + fs.s3a.signing-algorithm + S3SignerType + +``` + +We cannot make any promises that it will work, only that it has been known to +make the problem go away "once" + +### `AWSS3IOException` The Content-MD5 you specified did not match what we received + +Reads work, but writes, even `mkdir`, fail: + +``` +org.apache.hadoop.fs.s3a.AWSS3IOException: copyFromLocalFile(file:/tmp/hello.txt, s3a://bucket/hello.txt) + on file:/tmp/hello.txt: + The Content-MD5 you specified did not match what we received. + (Service: Amazon S3; Status Code: 400; Error Code: BadDigest; Request ID: 4018131225), + S3 Extended Request ID: null + at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:127) + at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:69) + at org.apache.hadoop.fs.s3a.S3AFileSystem.copyFromLocalFile(S3AFileSystem.java:1494) + at org.apache.hadoop.tools.cloudup.Cloudup.uploadOneFile(Cloudup.java:466) + at org.apache.hadoop.tools.cloudup.Cloudup.access$000(Cloudup.java:63) + at org.apache.hadoop.tools.cloudup.Cloudup$1.call(Cloudup.java:353) + at org.apache.hadoop.tools.cloudup.Cloudup$1.call(Cloudup.java:350) + at java.util.concurrent.FutureTask.run(FutureTask.java:266) + at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) + at java.util.concurrent.FutureTask.run(FutureTask.java:266) + at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) + at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) + at java.lang.Thread.run(Thread.java:748) +Caused by: com.amazonaws.services.s3.model.AmazonS3Exception: + The Content-MD5 you specified did not match what we received. + (Service: Amazon S3; Status Code: 400; Error Code: BadDigest; Request ID: 4018131225), + S3 Extended Request ID: null + at com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1307) + at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:894) + at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:597) + at com.amazonaws.http.AmazonHttpClient.doExecute(AmazonHttpClient.java:363) + at com.amazonaws.http.AmazonHttpClient.executeWithTimer(AmazonHttpClient.java:329) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:308) + at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:3659) + at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1422) + at com.amazonaws.services.s3.transfer.internal.UploadCallable.uploadInOneChunk(UploadCallable.java:131) + at com.amazonaws.services.s3.transfer.internal.UploadCallable.call(UploadCallable.java:123) + at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:139) + at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:47) + at org.apache.hadoop.fs.s3a.BlockingThreadPoolExecutorService$CallableWithPermitRelease.call(BlockingThreadPoolExecutorService.java:239) + ... 4 more +``` + +This stack trace was seen when interacting with a third-party S3 store whose +expectations of headers related to the AWS V4 signing mechanism was not +compatible with that of the specific AWS SDK Hadoop was using. + +Workaround: revert to V2 signing. + +```xml + + fs.s3a.signing-algorithm + S3SignerType + +``` + +### When writing data: "java.io.FileNotFoundException: Completing multi-part upload" + +A multipart upload was trying to complete, but failed as there was no upload +with that ID. + +``` +java.io.FileNotFoundException: Completing multi-part upload on fork-5/test/multipart/1c397ca6-9dfb-4ac1-9cf7-db666673246b: + com.amazonaws.services.s3.model.AmazonS3Exception: The specified upload does not exist. + The upload ID may be invalid, or the upload may have been aborted or completed. + (Service: Amazon S3; Status Code: 404; Error Code: NoSuchUpload; + at com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1182) + at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:770) + at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:489) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:310) + at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:3785) + at com.amazonaws.services.s3.AmazonS3Client.completeMultipartUpload(AmazonS3Client.java:2705) + at org.apache.hadoop.fs.s3a.S3ABlockOutputStream$MultiPartUpload.complete(S3ABlockOutputStream.java:473) + at org.apache.hadoop.fs.s3a.S3ABlockOutputStream$MultiPartUpload.access$200(S3ABlockOutputStream.java:382) + at org.apache.hadoop.fs.s3a.S3ABlockOutputStream.close(S3ABlockOutputStream.java:272) + at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72) + at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) +``` + +This can happen when all outstanding uploads have been aborted, including the +active ones. + +If the bucket has a lifecycle policy of deleting multipart uploads, make sure +that the expiry time of the deletion is greater than that required for all open +writes to complete the write, +*and for all jobs using the S3A committers to commit their work.* + +### Application hangs after reading a number of files + +The pool of https client connections and/or IO threads have been used up, and +none are being freed. + +1. The pools aren't big enough. + See ["Timeout waiting for connection from pool"](#timeout_from_pool) +2. Likely root cause: whatever code is reading files isn't calling `close()` + on the input streams. Make sure your code does this! + And if it's someone else's: make sure you have a recent version; search their + issue trackers to see if its a known/fixed problem. If not, it's time to work + with the developers, or come up with a workaround + (i.e closing the input stream yourself). + +### Issue: when writing data, HTTP Exceptions logged at info from `AmazonHttpClient` + +``` +[s3a-transfer-shared-pool4-t6] INFO http.AmazonHttpClient (AmazonHttpClient.java:executeHelper(496)) + - Unable to execute HTTP request: hwdev-steve-ireland-new.s3.amazonaws.com:443 failed to respond +org.apache.http.NoHttpResponseException: bucket.s3.amazonaws.com:443 failed to respond + at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:143) + at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:57) + at org.apache.http.impl.io.AbstractMessageParser.parse(AbstractMessageParser.java:261) + at org.apache.http.impl.AbstractHttpClientConnection.receiveResponseHeader(AbstractHttpClientConnection.java:283) + at org.apache.http.impl.conn.DefaultClientConnection.receiveResponseHeader(DefaultClientConnection.java:259) + at org.apache.http.impl.conn.ManagedClientConnectionImpl.receiveResponseHeader(ManagedClientConnectionImpl.java:209) + at org.apache.http.protocol.HttpRequestExecutor.doReceiveResponse(HttpRequestExecutor.java:272) + at com.amazonaws.http.protocol.SdkHttpRequestExecutor.doReceiveResponse(SdkHttpRequestExecutor.java:66) + at org.apache.http.protocol.HttpRequestExecutor.execute(HttpRequestExecutor.java:124) + at org.apache.http.impl.client.DefaultRequestDirector.tryExecute(DefaultRequestDirector.java:686) + at org.apache.http.impl.client.DefaultRequestDirector.execute(DefaultRequestDirector.java:488) + at org.apache.http.impl.client.AbstractHttpClient.doExecute(AbstractHttpClient.java:884) + at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:82) + at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:55) + at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:728) + at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:489) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:310) + at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:3785) + at com.amazonaws.services.s3.AmazonS3Client.copyPart(AmazonS3Client.java:1731) + at com.amazonaws.services.s3.transfer.internal.CopyPartCallable.call(CopyPartCallable.java:41) + at com.amazonaws.services.s3.transfer.internal.CopyPartCallable.call(CopyPartCallable.java:28) + at org.apache.hadoop.fs.s3a.BlockingThreadPoolExecutorService$CallableWithPermitRelease.call(BlockingThreadPoolExecutorService.java:239) + at java.util.concurrent.FutureTask.run(FutureTask.java:266) + at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) + at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) + at java.lang.Thread.run(Thread.java:745) +``` + +These are HTTP I/O exceptions caught and logged inside the AWS SDK. The client +will attempt to retry the operation; it may just be a transient event. If there +are many such exceptions in logs, it may be a symptom of connectivity or network +problems. + +### `AWSBadRequestException` IllegalLocationConstraintException/The unspecified location constraint is incompatible -## Other Issues +``` + Cause: org.apache.hadoop.fs.s3a.AWSBadRequestException: put on : + com.amazonaws.services.s3.model.AmazonS3Exception: + The unspecified location constraint is incompatible for the region specific + endpoint this request was sent to. + (Service: Amazon S3; Status Code: 400; Error Code: IllegalLocationConstraintException; + + at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:178) + at org.apache.hadoop.fs.s3a.S3ALambda.execute(S3ALambda.java:64) + at org.apache.hadoop.fs.s3a.WriteOperationHelper.uploadObject(WriteOperationHelper.java:451) + at org.apache.hadoop.fs.s3a.commit.magic.MagicCommitTracker.aboutToComplete(MagicCommitTracker.java:128) + at org.apache.hadoop.fs.s3a.S3ABlockOutputStream.close(S3ABlockOutputStream.java:373) + at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72) + at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101) + at org.apache.hadoop.hive.ql.io.orc.WriterImpl.close(WriterImpl.java:2429) + at org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.close(OrcOutputFormat.java:106) + at org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.close(OrcOutputFormat.java:91) + ... + Cause: com.amazonaws.services.s3.model.AmazonS3Exception: + The unspecified location constraint is incompatible for the region specific endpoint + this request was sent to. (Service: Amazon S3; Status Code: 400; Error Code: IllegalLocationConstraintException; + Request ID: EEBC5A08BCB3A645) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1588) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1258) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1030) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:742) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:716) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667) + at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513) + at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4221) + ... +``` + +Something has been trying to write data to "/". + +## Best Practises ### Enabling low-level logging @@ -1444,10 +1540,20 @@ http.headers (LoggingManagedHttpClientConnection.java:onResponseReceived(127)) - http.headers (LoggingManagedHttpClientConnection.java:onResponseReceived(127)) - http-outgoing-0 << Content-Length: 0 http.headers (LoggingManagedHttpClientConnection.java:onResponseReceived(127)) - http-outgoing-0 << Server: AmazonS3 execchain.MainClientExec (MainClientExec.java:execute(284)) - Connection can be kept alive for 60000 MILLISECONDS + ``` +### Enable S3 Server-side Logging + +The [Auditing](auditing) feature of the S3A connector can be used to generate +S3 Server Logs with information which can be used to debug problems +working with S3, such as throttling events. + +Consult the [auditing documentation](auditing) documentation. +As auditing is enabled by default, enabling S3 Logging for a bucket +should be sufficient to collect these logs. -## Reducing failures by configuring retry policy +### Reducing failures by configuring retry policy The S3A client can ba configured to retry those operations which are considered retryable. That can be because they are idempotent, or diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java index abd637a5240cb..761dd558063ba 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java @@ -18,13 +18,25 @@ package org.apache.hadoop.fs.s3a; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + import com.amazonaws.ClientConfiguration; import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.util.AwsHostNameUtils; import org.assertj.core.api.Assertions; import org.junit.Test; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext; + import static org.apache.hadoop.fs.s3a.Constants.AWS_REGION; +import static org.apache.hadoop.fs.s3a.Constants.AWS_S3_CENTRAL_REGION; +import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_ENDPOINT; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.AWS_REGION_SYSPROP; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Test to check correctness of S3A endpoint regions in @@ -36,6 +48,7 @@ public class ITestS3AEndpointRegion extends AbstractS3ATestBase { private static final String AWS_ENDPOINT_TEST = "test-endpoint"; private static final String AWS_ENDPOINT_TEST_WITH_REGION = "test-endpoint.some-region.amazonaws.com"; + public static final String MARS_NORTH_2 = "mars-north-2"; /** * Test to verify that setting a region with the config would bypass the @@ -88,4 +101,90 @@ private AwsClientBuilder.EndpointConfiguration createEpr(String endpoint, return DefaultS3ClientFactory.createEndpointConfiguration(endpoint, new ClientConfiguration(), awsRegion); } + + + @Test + public void testInvalidRegionDefaultEndpoint() throws Throwable { + describe("Create a client with an invalid region and the default endpoint"); + Configuration conf = getConfiguration(); + // we are making a big assumption about the timetable for AWS + // region rollout. + // if this test ever fails because this region now exists + // -congratulations! + conf.set(AWS_REGION, MARS_NORTH_2); + createMarsNorth2Client(conf); + } + + @Test + public void testUnsetRegionDefaultEndpoint() throws Throwable { + describe("Create a client with no region and the default endpoint"); + Configuration conf = getConfiguration(); + conf.unset(AWS_REGION); + createS3Client(conf, DEFAULT_ENDPOINT, AWS_S3_CENTRAL_REGION); + } + + /** + * By setting the system property {@code "aws.region"} we can + * guarantee that the SDK region resolution chain will always succeed + * (and fast). + * Clearly there is no validation of the region during the build process. + */ + @Test + public void testBlankRegionTriggersSDKResolution() throws Throwable { + describe("Create a client with a blank region and the default endpoint." + + " This will trigger the SDK Resolution chain"); + Configuration conf = getConfiguration(); + conf.set(AWS_REGION, ""); + System.setProperty(AWS_REGION_SYSPROP, MARS_NORTH_2); + try { + createMarsNorth2Client(conf); + } finally { + System.clearProperty(AWS_REGION_SYSPROP); + } + } + + /** + * Create an S3 client bonded to an invalid region; + * verify that calling {@code getRegion()} triggers + * a failure. + * @param conf configuration to use in the building. + */ + private void createMarsNorth2Client(Configuration conf) throws Exception { + AmazonS3 client = createS3Client(conf, DEFAULT_ENDPOINT, MARS_NORTH_2); + intercept(IllegalArgumentException.class, MARS_NORTH_2, client::getRegion); + } + + /** + * Create an S3 client with the given conf and endpoint. + * The region name must then match that of the expected + * value. + * @param conf configuration to use. + * @param endpoint endpoint. + * @param expectedRegion expected region + * @return the client. + * @throws URISyntaxException parse problems. + * @throws IOException IO problems + */ + private AmazonS3 createS3Client(Configuration conf, + String endpoint, + String expectedRegion) + throws URISyntaxException, IOException { + + DefaultS3ClientFactory factory + = new DefaultS3ClientFactory(); + factory.setConf(conf); + S3ClientFactory.S3ClientCreationParameters parameters + = new S3ClientFactory.S3ClientCreationParameters() + .withCredentialSet(new AnonymousAWSCredentialsProvider()) + .withEndpoint(endpoint) + .withMetrics(new EmptyS3AStatisticsContext() + .newStatisticsFromAwsSdk()); + AmazonS3 client = factory.createS3Client( + new URI("s3a://localhost/"), + parameters); + Assertions.assertThat(client.getRegionName()) + .describedAs("Client region name") + .isEqualTo(expectedRegion); + return client; + } } From 8d99d3abcf2ade67d7e013499797b1be3dc395bc Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Fri, 25 Jun 2021 01:07:59 +0800 Subject: [PATCH 441/666] HDFS-16044. Fix getListing call getLocatedBlocks even source is a directory. Contributed by ludun. --- .../hadoop/hdfs/protocol/HdfsFileStatus.java | 2 +- .../hdfs/protocol/TestHdfsFileStatus.java | 49 +++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatus.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index 264e3f4050fd7..3b12622484db1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -274,7 +274,7 @@ public Builder locations(LocatedBlocks locations) { * @return An {@link HdfsFileStatus} instance from these parameters. */ public HdfsFileStatus build() { - if (null == locations && !isdir && null == symlink) { + if (null == locations && isdir && null == symlink) { return new HdfsNamedFileStatus(length, isdir, replication, blocksize, mtime, atime, permission, flags, owner, group, symlink, path, fileId, childrenNum, feInfo, storagePolicy, ecPolicy); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatus.java new file mode 100644 index 0000000000000..67f26f0e7f1d1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatus.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import org.junit.Test; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +/** + * Test {@link HdfsFileStatus}. + */ +public class TestHdfsFileStatus { + private boolean createFileStatus(boolean isdir) { + HdfsFileStatus status = new HdfsFileStatus.Builder() + .isdir(isdir) + .build(); + + return status instanceof HdfsLocatedFileStatus; + } + + @Test + public void testHdfsFileStatusBuild() { + // listing directory + assertFalse("Status of directory should not be " + + "HdfsLocatedFileStatus", + createFileStatus(true)); + + // listing file when locations is null + assertTrue("Status of file should be HdfsLocatedFileStatus", + createFileStatus(false)); + } +} + From 1d42abd30c62d9fc865ae651b1525b59b7ef8c3e Mon Sep 17 00:00:00 2001 From: Peter Bacsko Date: Thu, 24 Jun 2021 20:15:10 +0200 Subject: [PATCH 442/666] YARN-10780. Optimise retrieval of configured node labels in CS queues. Contributed by Andras Gyori. --- .../scheduler/capacity/AbstractCSQueue.java | 40 +++++++--- .../capacity/AutoCreatedLeafQueue.java | 20 ++++- .../scheduler/capacity/CSQueueUtils.java | 9 +-- .../CapacitySchedulerConfiguration.java | 36 +++++++++ .../CapacitySchedulerQueueManager.java | 13 ++++ .../capacity/ConfiguredNodeLabels.java | 77 +++++++++++++++++++ .../capacity/ManagedParentQueue.java | 16 ++-- .../scheduler/capacity/ParentQueue.java | 5 +- ...CapacitySchedulerNewQueueAutoCreation.java | 15 ++++ .../capacity/TestReservationQueue.java | 5 ++ 10 files changed, 210 insertions(+), 26 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ConfiguredNodeLabels.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 94ade5e3fabce..2f6ca5a15970c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -76,6 +76,7 @@ import java.util.Set; import java.util.concurrent.locks.ReentrantReadWriteLock; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.UNDEFINED; public abstract class AbstractCSQueue implements CSQueue { @@ -95,6 +96,7 @@ public abstract class AbstractCSQueue implements CSQueue { final ResourceCalculator resourceCalculator; Set accessibleLabels; + protected Set configuredNodeLabels; Set resourceTypes; final RMNodeLabelsManager labelManager; String defaultLabelExpression; @@ -208,7 +210,7 @@ protected void setupConfigurableCapacities() { protected void setupConfigurableCapacities( CapacitySchedulerConfiguration configuration) { CSQueueUtils.loadCapacitiesByLabelsFromConf(getQueuePath(), queueCapacities, - configuration); + configuration, configuredNodeLabels); } @Override @@ -360,7 +362,7 @@ protected void setupQueueConfigs(Resource clusterResource, writeLock.lock(); try { - if (isDynamicQueue()) { + if (isDynamicQueue() || this instanceof AbstractAutoCreatedLeafQueue) { setDynamicQueueProperties(configuration); } // get labels @@ -386,6 +388,17 @@ protected void setupQueueConfigs(Resource clusterResource, this.defaultLabelExpression = parent.getDefaultNodeLabelExpression(); } + if (csContext.getCapacitySchedulerQueueManager() != null + && csContext.getCapacitySchedulerQueueManager() + .getConfiguredNodeLabels() != null) { + this.configuredNodeLabels = csContext.getCapacitySchedulerQueueManager() + .getConfiguredNodeLabels().getLabelsByQueue(getQueuePath()); + } else { + // Fallback to suboptimal but correct logic + this.configuredNodeLabels = csContext.getConfiguration() + .getConfiguredNodeLabels(queuePath); + } + // After we setup labels, we can setup capacities setupConfigurableCapacities(configuration); updateAbsoluteCapacities(); @@ -487,6 +500,19 @@ protected void setDynamicQueueProperties( if (getParent() instanceof ParentQueue) { ((ParentQueue) getParent()).getAutoCreatedQueueTemplate() .setTemplateEntriesForChild(configuration, getQueuePath()); + + String parentTemplate = String.format("%s.%s", getParent().getQueuePath(), + AutoCreatedQueueTemplate.AUTO_QUEUE_TEMPLATE_PREFIX); + parentTemplate = parentTemplate.substring(0, parentTemplate.lastIndexOf( + DOT)); + Set parentNodeLabels = csContext + .getCapacitySchedulerQueueManager().getConfiguredNodeLabels() + .getLabelsByQueue(parentTemplate); + + if (parentNodeLabels != null && parentNodeLabels.size() > 1) { + csContext.getCapacitySchedulerQueueManager().getConfiguredNodeLabels() + .setLabelsByQueue(queuePath, new HashSet<>(parentNodeLabels)); + } } } @@ -571,10 +597,7 @@ protected boolean checkConfigTypeIsAbsoluteResource(String queuePath, protected void updateConfigurableResourceRequirement(String queuePath, Resource clusterResource) { - CapacitySchedulerConfiguration conf = csContext.getConfiguration(); - Set configuredNodelabels = conf.getConfiguredNodeLabels(queuePath); - - for (String label : configuredNodelabels) { + for (String label : configuredNodeLabels) { Resource minResource = getMinimumAbsoluteResource(queuePath, label); Resource maxResource = getMaximumAbsoluteResource(queuePath, label); @@ -1578,9 +1601,7 @@ private void deriveCapacityFromAbsoluteConfigurations(String label, } void updateEffectiveResources(Resource clusterResource) { - Set configuredNodelabels = - csContext.getConfiguration().getConfiguredNodeLabels(getQueuePath()); - for (String label : configuredNodelabels) { + for (String label : configuredNodeLabels) { Resource resourceByLabel = labelManager.getResourceByLabel(label, clusterResource); @@ -1715,5 +1736,4 @@ public void setLastSubmittedTimestamp(long lastSubmittedTimestamp) { writeLock.unlock(); } } - } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueue.java index dd77a8088c033..7311be77baaa1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueue.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement; @@ -28,6 +27,8 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.HashSet; +import java.util.Set; /** * Leaf queues which are auto created by an underlying implementation of @@ -122,6 +123,23 @@ public void validateConfigurations(AutoCreatedLeafQueueConfig template) } } + @Override + protected void setDynamicQueueProperties( + CapacitySchedulerConfiguration configuration) { + String parentTemplate = String.format("%s.%s", getParent().getQueuePath(), + CapacitySchedulerConfiguration + .AUTO_CREATED_LEAF_QUEUE_TEMPLATE_PREFIX); + Set parentNodeLabels = csContext + .getCapacitySchedulerQueueManager().getConfiguredNodeLabels() + .getLabelsByQueue(parentTemplate); + + if (parentNodeLabels != null && parentNodeLabels.size() > 1) { + csContext.getCapacitySchedulerQueueManager().getConfiguredNodeLabels() + .setLabelsByQueue(getQueuePath(), + new HashSet<>(parentNodeLabels)); + } + } + private void validate(final CSQueue newlyParsedQueue) throws IOException { if (!(newlyParsedQueue instanceof AutoCreatedLeafQueue) || !newlyParsedQueue .getQueuePath().equals(getQueuePath())) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java index e2865cf5c1bcb..f43ea30001788 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java @@ -66,13 +66,12 @@ public static float computeAbsoluteMaximumCapacity( return (parentAbsMaxCapacity * maximumCapacity); } - public static void loadCapacitiesByLabelsFromConf(String queuePath, - QueueCapacities queueCapacities, CapacitySchedulerConfiguration csConf) { + public static void loadCapacitiesByLabelsFromConf( + String queuePath, QueueCapacities queueCapacities, + CapacitySchedulerConfiguration csConf, Set nodeLabels) { queueCapacities.clearConfigurableFields(); - Set configuredNodelabels = - csConf.getConfiguredNodeLabels(queuePath); - for (String label : configuredNodelabels) { + for (String label : nodeLabels) { if (label.equals(CommonNodeLabelsManager.NO_LABEL)) { queueCapacities.setCapacity(label, csConf.getNonLabeledQueueCapacity(queuePath) / 100); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 622fef61ffc54..4afa2e7419221 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -1564,6 +1564,42 @@ public Set getConfiguredNodeLabels(String queuePath) { return configuredNodeLabels; } + /** + * Get configured node labels for all queues that have accessible-node-labels + * prefixed properties set. + * @return configured node labels + */ + public Map> getConfiguredNodeLabelsByQueue() { + Map> labelsByQueue = new HashMap<>(); + Map schedulerEntries = getPropsWithPrefix( + CapacitySchedulerConfiguration.PREFIX); + + for (Map.Entry propertyEntry + : schedulerEntries.entrySet()) { + String key = propertyEntry.getKey(); + // Consider all keys that has accessible-node-labels prefix, excluding + // .accessible-node-labels itself + if (key.contains(ACCESSIBLE_NODE_LABELS + DOT)) { + // Find in + // .accessible-node-labels..property + int labelStartIdx = + key.indexOf(ACCESSIBLE_NODE_LABELS) + + ACCESSIBLE_NODE_LABELS.length() + 1; + int labelEndIndx = key.indexOf('.', labelStartIdx); + String labelName = key.substring(labelStartIdx, labelEndIndx); + // Find queuePath and exclude "." at the end + String queuePath = key.substring(0, key.indexOf( + ACCESSIBLE_NODE_LABELS) - 1); + if (!labelsByQueue.containsKey(queuePath)) { + labelsByQueue.put(queuePath, new HashSet<>()); + labelsByQueue.get(queuePath).add(RMNodeLabelsManager.NO_LABEL); + } + labelsByQueue.get(queuePath).add(labelName); + } + } + return labelsByQueue; + } + public Integer getDefaultApplicationPriorityConfPerQueue(String queue) { Integer defaultPriority = getInt(getQueuePrefix(queue) + DEFAULT_APPLICATION_PRIORITY, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java index a86a2536d0584..e5b41cede3ae2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java @@ -84,6 +84,7 @@ public CSQueue hook(CSQueue queue) { private QueueStateManager queueStateManager; + private ConfiguredNodeLabels configuredNodeLabels; /** * Construct the service. @@ -98,6 +99,7 @@ public CapacitySchedulerQueueManager(Configuration conf, this.labelManager = labelManager; this.queueStateManager = new QueueStateManager<>(); this.appPriorityACLManager = appPriorityACLManager; + this.configuredNodeLabels = new ConfiguredNodeLabels(); } @Override @@ -165,6 +167,7 @@ public void setCapacitySchedulerContext( */ public void initializeQueues(CapacitySchedulerConfiguration conf) throws IOException { + configuredNodeLabels = new ConfiguredNodeLabels(conf); root = parseQueue(this.csContext, conf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, NOOP); setQueueAcls(authorizer, appPriorityACLManager, queues); @@ -180,6 +183,7 @@ public void reinitializeQueues(CapacitySchedulerConfiguration newConf) throws IOException { // Parse new queues CSQueueStore newQueues = new CSQueueStore(); + configuredNodeLabels = new ConfiguredNodeLabels(newConf); CSQueue newRoot = parseQueue(this.csContext, newConf, null, CapacitySchedulerConfiguration.ROOT, newQueues, queues, NOOP); @@ -619,6 +623,15 @@ public List determineMissingParents( return parentsToCreate; } + /** + * Get {@code ConfiguredNodeLabels} which contains the configured node labels + * for all queues. + * @return configured node labels + */ + public ConfiguredNodeLabels getConfiguredNodeLabels() { + return configuredNodeLabels; + } + private LeafQueue createAutoQueue(ApplicationPlacementContext queue) throws SchedulerDynamicEditException { List parentsToCreate = determineMissingParents(queue); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ConfiguredNodeLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ConfiguredNodeLabels.java new file mode 100644 index 0000000000000..ba82c6a450f11 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ConfiguredNodeLabels.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; + +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * Contains node labels for all queues extracted from configuration properties. + * A queue has a configured node label if it has a property set with an + * accessible-node-labels prefix. + * Example: + * yarn.scheduler.capacity.root.accessible-node-labels.test-label.capacity + */ +public class ConfiguredNodeLabels { + private final Map> configuredNodeLabelsByQueue; + private static final Set NO_LABEL = + ImmutableSet.of(RMNodeLabelsManager.NO_LABEL); + + public ConfiguredNodeLabels() { + configuredNodeLabelsByQueue = new HashMap<>(); + } + + public ConfiguredNodeLabels( + CapacitySchedulerConfiguration conf) { + this.configuredNodeLabelsByQueue = conf.getConfiguredNodeLabelsByQueue(); + } + + /** + * Returns a set of configured node labels for a queue. If no labels are set + * for a queue, it defaults to a one element immutable collection containing + * empty label. + * @param queuePath path of the queue + * @return configured node labels or an immutable set containing the empty + * label + */ + public Set getLabelsByQueue(String queuePath) { + Set labels = configuredNodeLabelsByQueue.get(queuePath); + + if (labels == null) { + return NO_LABEL; + } + + return ImmutableSet.copyOf(labels); + } + + /** + * Set node labels for a specific queue. + * @param queuePath path of the queue + * @param nodeLabels configured node labels to set + */ + public void setLabelsByQueue( + String queuePath, Collection nodeLabels) { + configuredNodeLabelsByQueue.put(queuePath, new HashSet<>(nodeLabels)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ManagedParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ManagedParentQueue.java index 88fae00f1b459..e415ac1279585 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ManagedParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ManagedParentQueue.java @@ -37,6 +37,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; /** * Auto Creation enabled Parent queue. This queue initially does not have any @@ -164,10 +165,13 @@ protected AutoCreatedLeafQueueConfig.Builder initializeLeafQueueConfigs() throws CapacitySchedulerConfiguration conf = super.initializeLeafQueueConfigs(leafQueueTemplateConfPrefix); builder.configuration(conf); + String templateQueuePath = csContext.getConfiguration() + .getAutoCreatedQueueTemplateConfPrefix(getQueuePath()); - for (String nodeLabel : conf - .getConfiguredNodeLabels(csContext.getConfiguration() - .getAutoCreatedQueueTemplateConfPrefix(getQueuePath()))) { + Set templateConfiguredNodeLabels = csContext + .getCapacitySchedulerQueueManager().getConfiguredNodeLabels() + .getLabelsByQueue(templateQueuePath); + for (String nodeLabel : templateConfiguredNodeLabels) { Resource templateMinResource = conf.getMinimumResourceRequirement( nodeLabel, csContext.getConfiguration() .getAutoCreatedQueueTemplateConfPrefix(getQueuePath()), @@ -182,10 +186,10 @@ protected AutoCreatedLeafQueueConfig.Builder initializeLeafQueueConfigs() throws //Load template capacities QueueCapacities queueCapacities = new QueueCapacities(false); - CSQueueUtils.loadCapacitiesByLabelsFromConf(csContext.getConfiguration() - .getAutoCreatedQueueTemplateConfPrefix(getQueuePath()), + CSQueueUtils.loadCapacitiesByLabelsFromConf(templateQueuePath, queueCapacities, - csContext.getConfiguration()); + csContext.getConfiguration(), + templateConfiguredNodeLabels); /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index ee6015505a756..8ff4d911a541b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -25,7 +25,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; @@ -1258,9 +1257,7 @@ public void updateClusterResource(Resource clusterResource, } // Update effective capacity in all parent queue. - Set configuredNodelabels = csContext.getConfiguration() - .getConfiguredNodeLabels(getQueuePath()); - for (String label : configuredNodelabels) { + for (String label : configuredNodeLabels) { calculateEffectiveResourcesAndCapacity(label, clusterResource); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java index 7d0d537b49ee7..4229676e60b61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java @@ -706,6 +706,21 @@ public void testAutoCreatedQueueTemplateConfig() throws Exception { LeafQueue a3 = createQueue("root.a.a3"); Assert.assertFalse("auto queue deletion should be turned off on a3", a3.isEligibleForAutoDeletion()); + + // Set the capacity of label TEST + csConf.set(AutoCreatedQueueTemplate.getAutoQueueTemplatePrefix( + "root.c") + "accessible-node-labels.TEST.capacity", "6w"); + csConf.setQueues("root", new String[]{"a", "b", "c"}); + csConf.setAutoQueueCreationV2Enabled("root.c", true); + cs.reinitialize(csConf, mockRM.getRMContext()); + LeafQueue c1 = createQueue("root.c.c1"); + Assert.assertEquals("weight is not set for label TEST", 6f, + c1.getQueueCapacities().getWeight("TEST"), 1e-6); + cs.reinitialize(csConf, mockRM.getRMContext()); + c1 = (LeafQueue) cs.getQueue("root.c.c1"); + Assert.assertEquals("weight is not set for label TEST", 6f, + c1.getQueueCapacities().getWeight("TEST"), 1e-6); + } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservationQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservationQueue.java index 84de7ccb82f68..8407922b6331d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservationQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservationQueue.java @@ -60,7 +60,12 @@ public void setup() throws IOException, SchedulerDynamicEditException { YarnConfiguration conf = new YarnConfiguration(); csContext = mock(CapacitySchedulerContext.class); + CapacitySchedulerQueueManager csQm = mock( + CapacitySchedulerQueueManager.class); + ConfiguredNodeLabels labels = new ConfiguredNodeLabels(csConf); + when(csQm.getConfiguredNodeLabels()).thenReturn(labels); when(csContext.getConfiguration()).thenReturn(csConf); + when(csContext.getCapacitySchedulerQueueManager()).thenReturn(csQm); when(csContext.getConf()).thenReturn(conf); when(csContext.getMinimumResourceCapability()).thenReturn( Resources.createResource(GB, 1)); From 6a67b4a98992d440b6d6d1b8df9bba8dd090a265 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Fri, 25 Jun 2021 22:38:34 +0800 Subject: [PATCH 443/666] Revert "HDFS-16044. Fix getListing call getLocatedBlocks even source is a directory. Contributed by ludun." This reverts commit 0d078377120da9ea886bd95b19c8a618dc4d7ab5. --- .../hadoop/hdfs/protocol/HdfsFileStatus.java | 2 +- .../hdfs/protocol/TestHdfsFileStatus.java | 49 ------------------- 2 files changed, 1 insertion(+), 50 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatus.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index 3b12622484db1..264e3f4050fd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -274,7 +274,7 @@ public Builder locations(LocatedBlocks locations) { * @return An {@link HdfsFileStatus} instance from these parameters. */ public HdfsFileStatus build() { - if (null == locations && isdir && null == symlink) { + if (null == locations && !isdir && null == symlink) { return new HdfsNamedFileStatus(length, isdir, replication, blocksize, mtime, atime, permission, flags, owner, group, symlink, path, fileId, childrenNum, feInfo, storagePolicy, ecPolicy); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatus.java deleted file mode 100644 index 67f26f0e7f1d1..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatus.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import org.junit.Test; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Test {@link HdfsFileStatus}. - */ -public class TestHdfsFileStatus { - private boolean createFileStatus(boolean isdir) { - HdfsFileStatus status = new HdfsFileStatus.Builder() - .isdir(isdir) - .build(); - - return status instanceof HdfsLocatedFileStatus; - } - - @Test - public void testHdfsFileStatusBuild() { - // listing directory - assertFalse("Status of directory should not be " + - "HdfsLocatedFileStatus", - createFileStatus(true)); - - // listing file when locations is null - assertTrue("Status of file should be HdfsLocatedFileStatus", - createFileStatus(false)); - } -} - From b106f39e5dd8cbeb6648befa698fb7cc5ade5da9 Mon Sep 17 00:00:00 2001 From: Zamil Majdy <76959103+majdyz@users.noreply.github.com> Date: Sat, 26 Jun 2021 02:01:48 +0700 Subject: [PATCH 444/666] HADOOP-17764. S3AInputStream read does not re-open the input stream on the second read retry attempt (#3109) Contributed by Zamil Majdy. --- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 5 +- .../apache/hadoop/fs/s3a/S3AInputStream.java | 8 +- .../fs/s3a/TestS3AInputStreamRetry.java | 209 ++++++++++++++++++ 3 files changed, 217 insertions(+), 5 deletions(-) create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AInputStreamRetry.java diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 439d52edc14f5..1522432c9f5d4 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -1439,10 +1439,13 @@ public S3Object getObject(GetObjectRequest request) { * using FS state as well as the status. * @param fileStatus file status. * @param seekPolicy input policy for this operation + * @param changePolicy change policy for this operation. * @param readAheadRange readahead value. + * @param auditSpan audit span. * @return a context for read and select operations. */ - private S3AReadOpContext createReadContext( + @VisibleForTesting + protected S3AReadOpContext createReadContext( final FileStatus fileStatus, final S3AInputPolicy seekPolicy, final ChangeDetectionPolicy changePolicy, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java index d56d4ac433eb7..b65dcc95293ce 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java @@ -424,10 +424,10 @@ public synchronized int read() throws IOException { return -1; } catch (SocketTimeoutException e) { onReadFailure(e, 1, true); - b = wrappedStream.read(); + throw e; } catch (IOException e) { onReadFailure(e, 1, false); - b = wrappedStream.read(); + throw e; } return b; }); @@ -513,10 +513,10 @@ public synchronized int read(byte[] buf, int off, int len) return -1; } catch (SocketTimeoutException e) { onReadFailure(e, len, true); - bytes = wrappedStream.read(buf, off, len); + throw e; } catch (IOException e) { onReadFailure(e, len, false); - bytes= wrappedStream.read(buf, off, len); + throw e; } return bytes; }); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AInputStreamRetry.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AInputStreamRetry.java new file mode 100644 index 0000000000000..05a07ce444c92 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AInputStreamRetry.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a; + +import javax.net.ssl.SSLException; +import java.io.IOException; +import java.net.SocketException; +import java.nio.charset.Charset; + +import com.amazonaws.services.s3.model.GetObjectRequest; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.S3Object; +import com.amazonaws.services.s3.model.S3ObjectInputStream; +import org.junit.Test; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.audit.impl.NoopSpan; +import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecrets; +import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; + +import static java.lang.Math.min; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +/** + * Tests S3AInputStream retry behavior on read failure. + * These tests are for validating expected behavior of retrying the + * S3AInputStream read() and read(b, off, len), it tests that the read should + * reopen the input stream and retry the read when IOException is thrown + * during the read process. + */ +public class TestS3AInputStreamRetry extends AbstractS3AMockTest { + + private static final String INPUT = "ab"; + + @Test + public void testInputStreamReadRetryForException() throws IOException { + S3AInputStream s3AInputStream = getMockedS3AInputStream(); + + assertEquals("'a' from the test input stream 'ab' should be the first " + + "character being read", INPUT.charAt(0), s3AInputStream.read()); + assertEquals("'b' from the test input stream 'ab' should be the second " + + "character being read", INPUT.charAt(1), s3AInputStream.read()); + } + + @Test + public void testInputStreamReadLengthRetryForException() throws IOException { + byte[] result = new byte[INPUT.length()]; + S3AInputStream s3AInputStream = getMockedS3AInputStream(); + s3AInputStream.read(result, 0, INPUT.length()); + + assertArrayEquals( + "The read result should equals to the test input stream content", + INPUT.getBytes(), result); + } + + @Test + public void testInputStreamReadFullyRetryForException() throws IOException { + byte[] result = new byte[INPUT.length()]; + S3AInputStream s3AInputStream = getMockedS3AInputStream(); + s3AInputStream.readFully(0, result); + + assertArrayEquals( + "The read result should equals to the test input stream content", + INPUT.getBytes(), result); + } + + private S3AInputStream getMockedS3AInputStream() { + Path path = new Path("test-path"); + String eTag = "test-etag"; + String versionId = "test-version-id"; + String owner = "test-owner"; + + S3AFileStatus s3AFileStatus = new S3AFileStatus( + INPUT.length(), 0, path, INPUT.length(), owner, eTag, versionId); + + S3ObjectAttributes s3ObjectAttributes = new S3ObjectAttributes( + fs.getBucket(), + path, + fs.pathToKey(path), + fs.getServerSideEncryptionAlgorithm(), + new EncryptionSecrets().getEncryptionKey(), + eTag, + versionId, + INPUT.length()); + + S3AReadOpContext s3AReadOpContext = fs.createReadContext( + s3AFileStatus, S3AInputPolicy.Normal, + ChangeDetectionPolicy.getPolicy(fs.getConf()), 100, NoopSpan.INSTANCE); + + return new S3AInputStream( + s3AReadOpContext, + s3ObjectAttributes, + getMockedInputStreamCallback()); + } + + /** + * Get mocked InputStreamCallbacks where we return mocked S3Object. + * + * @return mocked object. + */ + private S3AInputStream.InputStreamCallbacks getMockedInputStreamCallback() { + return new S3AInputStream.InputStreamCallbacks() { + + private final S3Object mockedS3Object = getMockedS3Object(); + + @Override + public S3Object getObject(GetObjectRequest request) { + // Set s3 client to return mocked s3object with defined read behavior. + return mockedS3Object; + } + + @Override + public GetObjectRequest newGetRequest(String key) { + return new GetObjectRequest(fs.getBucket(), key); + } + + @Override + public void close() { + } + }; + } + + /** + * Get mocked S3Object that returns bad input stream on the initial of + * getObjectContent calls. + * + * @return mocked object. + */ + private S3Object getMockedS3Object() { + S3ObjectInputStream objectInputStreamBad1 = getMockedInputStream(true); + S3ObjectInputStream objectInputStreamBad2 = getMockedInputStream(true); + S3ObjectInputStream objectInputStreamGood = getMockedInputStream(false); + + return new S3Object() { + private final S3ObjectInputStream[] inputStreams = + {objectInputStreamBad1, objectInputStreamBad2, objectInputStreamGood}; + + private Integer inputStreamIndex = 0; + + @Override + public S3ObjectInputStream getObjectContent() { + // Set getObjectContent behavior: + // Returns bad stream twice, and good stream afterwards. + inputStreamIndex++; + return inputStreams[min(inputStreamIndex, inputStreams.length) - 1]; + } + + @Override + public ObjectMetadata getObjectMetadata() { + // Set getObjectMetadata behavior: returns dummy metadata + ObjectMetadata metadata = new ObjectMetadata(); + metadata.setHeader("ETag", "test-etag"); + return metadata; + } + }; + } + + /** + * Get mocked S3ObjectInputStream where we can trigger IOException to + * simulate the read failure. + * + * @param triggerFailure true when a failure injection is enabled. + * @return mocked object. + */ + private S3ObjectInputStream getMockedInputStream(boolean triggerFailure) { + return new S3ObjectInputStream( + IOUtils.toInputStream(INPUT, Charset.defaultCharset()), null) { + + private final IOException exception = + new SSLException(new SocketException("Connection reset")); + + @Override + public int read() throws IOException { + int result = super.read(); + if (triggerFailure) { + throw exception; + } + return result; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int result = super.read(b, off, len); + if (triggerFailure) { + throw exception; + } + return result; + } + }; + } +} From 9b167ef7a75c07218f38168d2363de9cb78136d7 Mon Sep 17 00:00:00 2001 From: Jim Brennan Date: Fri, 25 Jun 2021 20:32:08 +0000 Subject: [PATCH 445/666] YARN-10824. Title not set for JHS and NM webpages. Contributed by Bilwa S T. --- .../org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java | 1 + .../apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java index d544c6ba7cd25..8b6feab1e7a15 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java @@ -39,6 +39,7 @@ public class HsAboutPage extends HsView { commonPreHead(html); //override the nav config from commonPReHead set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); + setTitle("About History Server"); } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java index ae9b92d6e3da1..e0efb1392f333 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java @@ -41,7 +41,7 @@ public class NodePage extends NMView { @Override protected void commonPreHead(HTML<__> html) { super.commonPreHead(html); - + setTitle("NodeManager information"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}"); } From ee5d86940ddac25b277fdc9bb81f02f105c0022f Mon Sep 17 00:00:00 2001 From: Masatake Iwasaki Date: Mon, 21 Jun 2021 12:10:44 +0000 Subject: [PATCH 446/666] HDFS-13916. Distcp SnapshotDiff to support WebHDFS. Contributed by Xun REN. Signed-off-by: Masatake Iwasaki --- .../org/apache/hadoop/tools/DistCpSync.java | 74 +++++--- .../apache/hadoop/tools/TestDistCpSync.java | 162 +++++++++++++++--- 2 files changed, 189 insertions(+), 47 deletions(-) diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java index 35ef3e4ab77e7..6cbc936136231 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.tools.CopyListing.InvalidInputException; import java.io.FileNotFoundException; @@ -40,7 +41,8 @@ /** * This class provides the basic functionality to sync two FileSystems based on * the snapshot diff report. More specifically, we have the following settings: - * 1. Both the source and target FileSystem must be DistributedFileSystem + * 1. Both the source and target FileSystem must be DistributedFileSystem or + * (s)WebHdfsFileSystem * 2. Two snapshots (e.g., s1 and s2) have been created on the source FS. * The diff between these two snapshots will be copied to the target FS. * 3. The target has the same snapshot s1. No changes have been made on the @@ -73,7 +75,7 @@ private boolean isRdiff() { /** * Check if three conditions are met before sync. * 1. Only one source directory. - * 2. Both source and target file system are DFS. + * 2. Both source and target file system are DFS or WebHdfs. * 3. There is no change between from and the current status in target * file system. * Throw exceptions if first two aren't met, and return false to fallback to @@ -95,17 +97,22 @@ private boolean preSyncCheck() throws IOException { final Path snapshotDiffDir = isRdiff() ? targetDir : sourceDir; // currently we require both the source and the target file system are - // DistributedFileSystem. - if (!(srcFs instanceof DistributedFileSystem) || - !(tgtFs instanceof DistributedFileSystem)) { - throw new IllegalArgumentException("The FileSystems needs to" + - " be DistributedFileSystem for using snapshot-diff-based distcp"); + // DistributedFileSystem or (S)WebHdfsFileSystem. + if (!(srcFs instanceof DistributedFileSystem + || srcFs instanceof WebHdfsFileSystem)) { + throw new IllegalArgumentException("Unsupported source file system: " + + srcFs.getScheme() + "://. " + + "Supported file systems: hdfs://, webhdfs:// and swebhdfs://."); + } + if (!(tgtFs instanceof DistributedFileSystem + || tgtFs instanceof WebHdfsFileSystem)) { + throw new IllegalArgumentException("Unsupported target file system: " + + tgtFs.getScheme() + "://. " + + "Supported file systems: hdfs://, webhdfs:// and swebhdfs://."); } - - final DistributedFileSystem targetFs = (DistributedFileSystem) tgtFs; // make sure targetFS has no change between from and the current states - if (!checkNoChange(targetFs, targetDir)) { + if (!checkNoChange(tgtFs, targetDir)) { // set the source path using the snapshot path context.setSourcePaths(Arrays.asList(getSnapshotPath(sourceDir, context.getToSnapshot()))); @@ -161,23 +168,22 @@ public boolean sync() throws IOException { final Path sourceDir = sourcePaths.get(0); final Path targetDir = context.getTargetPath(); final FileSystem tfs = targetDir.getFileSystem(conf); - final DistributedFileSystem targetFs = (DistributedFileSystem) tfs; Path tmpDir = null; try { - tmpDir = createTargetTmpDir(targetFs, targetDir); + tmpDir = createTargetTmpDir(tfs, targetDir); DiffInfo[] renameAndDeleteDiffs = getRenameAndDeleteDiffsForSync(targetDir); if (renameAndDeleteDiffs.length > 0) { // do the real sync work: deletion and rename - syncDiff(renameAndDeleteDiffs, targetFs, tmpDir); + syncDiff(renameAndDeleteDiffs, tfs, tmpDir); } return true; } catch (Exception e) { DistCp.LOG.warn("Failed to use snapshot diff for distcp", e); return false; } finally { - deleteTargetTmpDir(targetFs, tmpDir); + deleteTargetTmpDir(tfs, tmpDir); // TODO: since we have tmp directory, we can support "undo" with failures // set the source path using the snapshot path context.setSourcePaths(Arrays.asList(getSnapshotPath(sourceDir, @@ -195,12 +201,22 @@ private boolean getAllDiffs() throws IOException { context.getTargetPath() : context.getSourcePaths().get(0); try { - DistributedFileSystem fs = - (DistributedFileSystem) ssDir.getFileSystem(conf); + SnapshotDiffReport report = null; + FileSystem fs = ssDir.getFileSystem(conf); final String from = getSnapshotName(context.getFromSnapshot()); final String to = getSnapshotName(context.getToSnapshot()); - SnapshotDiffReport report = fs.getSnapshotDiffReport(ssDir, - from, to); + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = (DistributedFileSystem)fs; + report = dfs.getSnapshotDiffReport(ssDir, from, to); + } else if (fs instanceof WebHdfsFileSystem) { + WebHdfsFileSystem webHdfs = (WebHdfsFileSystem)fs; + report = webHdfs.getSnapshotDiffReport(ssDir, from, to); + } else { + throw new IllegalArgumentException("Unsupported file system: " + + fs.getScheme() + "://. " + + "Supported file systems: hdfs://, webhdfs:// and swebhdfs://."); + } + this.diffMap = new EnumMap<>(SnapshotDiffReport.DiffType.class); for (SnapshotDiffReport.DiffType type : SnapshotDiffReport.DiffType.values()) { @@ -265,7 +281,7 @@ private Path getSnapshotPath(Path inputDir, String snapshotName) { } } - private Path createTargetTmpDir(DistributedFileSystem targetFs, + private Path createTargetTmpDir(FileSystem targetFs, Path targetDir) throws IOException { final Path tmp = new Path(targetDir, DistCpConstants.HDFS_DISTCP_DIFF_DIRECTORY_NAME + DistCp.rand.nextInt()); @@ -275,7 +291,7 @@ private Path createTargetTmpDir(DistributedFileSystem targetFs, return tmp; } - private void deleteTargetTmpDir(DistributedFileSystem targetFs, + private void deleteTargetTmpDir(FileSystem targetFs, Path tmpDir) { try { if (tmpDir != null) { @@ -290,11 +306,17 @@ private void deleteTargetTmpDir(DistributedFileSystem targetFs, * Compute the snapshot diff on the given file system. Return true if the diff * is empty, i.e., no changes have happened in the FS. */ - private boolean checkNoChange(DistributedFileSystem fs, Path path) { + private boolean checkNoChange(FileSystem fs, Path path) { try { final String from = getSnapshotName(context.getFromSnapshot()); - SnapshotDiffReport targetDiff = - fs.getSnapshotDiffReport(path, from, ""); + SnapshotDiffReport targetDiff = null; + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = (DistributedFileSystem)fs; + targetDiff = dfs.getSnapshotDiffReport(path, from, ""); + } else { + WebHdfsFileSystem webHdfs = (WebHdfsFileSystem)fs; + targetDiff = webHdfs.getSnapshotDiffReport(path, from, ""); + } if (!targetDiff.getDiffList().isEmpty()) { DistCp.LOG.warn("The target has been modified since snapshot " + context.getFromSnapshot()); @@ -310,7 +332,7 @@ private boolean checkNoChange(DistributedFileSystem fs, Path path) { } private void syncDiff(DiffInfo[] diffs, - DistributedFileSystem targetFs, Path tmpDir) throws IOException { + FileSystem targetFs, Path tmpDir) throws IOException { moveToTmpDir(diffs, targetFs, tmpDir); moveToTarget(diffs, targetFs); } @@ -320,7 +342,7 @@ private void syncDiff(DiffInfo[] diffs, * directory. */ private void moveToTmpDir(DiffInfo[] diffs, - DistributedFileSystem targetFs, Path tmpDir) throws IOException { + FileSystem targetFs, Path tmpDir) throws IOException { // sort the diffs based on their source paths to make sure the files and // subdirs are moved before moving their parents/ancestors. Arrays.sort(diffs, DiffInfo.sourceComparator); @@ -341,7 +363,7 @@ private void moveToTmpDir(DiffInfo[] diffs, * from the tmp dir to the final targets. */ private void moveToTarget(DiffInfo[] diffs, - DistributedFileSystem targetFs) throws IOException { + FileSystem targetFs) throws IOException { // sort the diffs based on their target paths to make sure the parent // directories are created first. Arrays.sort(diffs, DiffInfo.targetComparator); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java index c80a79bbb88bf..220caaefdb43c 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java @@ -20,6 +20,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -27,6 +28,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; @@ -39,7 +43,6 @@ import org.junit.Before; import org.junit.Test; - import java.io.IOException; import java.io.FileWriter; import java.io.BufferedWriter; @@ -48,12 +51,14 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; public class TestDistCpSync { private MiniDFSCluster cluster; private final Configuration conf = new HdfsConfiguration(); private DistributedFileSystem dfs; + private WebHdfsFileSystem webfs; private DistCpContext context; private final Path source = new Path("/source"); private final Path target = new Path("/target"); @@ -65,6 +70,9 @@ public void setUp() throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATA_NUM).build(); cluster.waitActive(); + webfs = WebHdfsTestUtil. + getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); + dfs = cluster.getFileSystem(); dfs.mkdirs(source); dfs.mkdirs(target); @@ -160,6 +168,10 @@ private boolean sync() throws Exception { * f3 f4 */ private void initData(Path dir) throws Exception { + initData(dfs, dir); + } + + private void initData(FileSystem fs, Path dir) throws Exception { final Path foo = new Path(dir, "foo"); final Path bar = new Path(dir, "bar"); final Path d1 = new Path(foo, "d1"); @@ -169,10 +181,10 @@ private void initData(Path dir) throws Exception { final Path f3 = new Path(d1, "f3"); final Path f4 = new Path(d2, "f4"); - DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0); - DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE, DATA_NUM, 0); - DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE, DATA_NUM, 0); - DFSTestUtil.createFile(dfs, f4, BLOCK_SIZE, DATA_NUM, 0); + DFSTestUtil.createFile(fs, f1, BLOCK_SIZE, DATA_NUM, 0); + DFSTestUtil.createFile(fs, f2, BLOCK_SIZE, DATA_NUM, 0); + DFSTestUtil.createFile(fs, f3, BLOCK_SIZE, DATA_NUM, 0); + DFSTestUtil.createFile(fs, f4, BLOCK_SIZE, DATA_NUM, 0); } /** @@ -192,7 +204,7 @@ private void initData(Path dir) throws Exception { * foo/ f4 * f1(new) */ - private int changeData(Path dir) throws Exception { + private int changeData(FileSystem fs, Path dir) throws Exception { final Path foo = new Path(dir, "foo"); final Path bar = new Path(dir, "bar"); final Path d1 = new Path(foo, "d1"); @@ -200,21 +212,21 @@ private int changeData(Path dir) throws Exception { final Path bar_d1 = new Path(bar, "d1"); int numCreatedModified = 0; - dfs.rename(d1, bar_d1); + fs.rename(d1, bar_d1); numCreatedModified += 1; // modify ./foo numCreatedModified += 1; // modify ./bar final Path f3 = new Path(bar_d1, "f3"); - dfs.delete(f3, true); + fs.delete(f3, true); final Path newfoo = new Path(bar_d1, "foo"); - dfs.rename(foo, newfoo); + fs.rename(foo, newfoo); numCreatedModified += 1; // modify ./foo/d1 final Path f1 = new Path(newfoo, "f1"); - dfs.delete(f1, true); - DFSTestUtil.createFile(dfs, f1, 2 * BLOCK_SIZE, DATA_NUM, 0); + fs.delete(f1, true); + DFSTestUtil.createFile(fs, f1, 2 * BLOCK_SIZE, DATA_NUM, 0); numCreatedModified += 1; // create ./foo/f1 - DFSTestUtil.appendFile(dfs, f2, (int) BLOCK_SIZE); + DFSTestUtil.appendFile(fs, f2, (int) BLOCK_SIZE); numCreatedModified += 1; // modify ./bar/f2 - dfs.rename(bar, new Path(dir, "foo")); + fs.rename(bar, new Path(dir, "foo")); return numCreatedModified; } @@ -228,7 +240,7 @@ public void testSync() throws Exception { enableAndCreateFirstSnapshot(); // make changes under source - int numCreatedModified = changeData(source); + int numCreatedModified = changeData(dfs, source); dfs.createSnapshot(source, "s2"); // before sync, make some further changes on source. this should not affect @@ -295,23 +307,51 @@ private Map getListing(Path listingPath) return values; } + /** + * By default, we are using DFS for both source and target. + * @param s source file status + * @param t target file status + * @param compareName whether will we compare the name of the files + * @throws Exception + */ private void verifyCopy(FileStatus s, FileStatus t, boolean compareName) - throws Exception { + throws Exception { + verifyCopy(dfs, dfs, s, t, compareName); + } + + /** + * Verify copy by using different file systems. + * @param sfs source file system + * @param tfs target file system + * @param s source file status + * @param t target file status + * @param compareName whether will we compare the name of the files + * @throws Exception + */ + private void verifyCopyByFs(FileSystem sfs, FileSystem tfs, + FileStatus s, FileStatus t, boolean compareName) + throws Exception { + verifyCopy(sfs, tfs, s, t, compareName); + } + + private void verifyCopy(FileSystem sfs, FileSystem tfs, + FileStatus s, FileStatus t, boolean compareName) + throws Exception { Assert.assertEquals(s.isDirectory(), t.isDirectory()); if (compareName) { Assert.assertEquals(s.getPath().getName(), t.getPath().getName()); } if (!s.isDirectory()) { // verify the file content is the same - byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath()); - byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath()); + byte[] sbytes = DFSTestUtil.readFileBuffer(sfs, s.getPath()); + byte[] tbytes = DFSTestUtil.readFileBuffer(tfs, t.getPath()); Assert.assertArrayEquals(sbytes, tbytes); } else { - FileStatus[] slist = dfs.listStatus(s.getPath()); - FileStatus[] tlist = dfs.listStatus(t.getPath()); + FileStatus[] slist = sfs.listStatus(s.getPath()); + FileStatus[] tlist = tfs.listStatus(t.getPath()); Assert.assertEquals(slist.length, tlist.length); for (int i = 0; i < slist.length; i++) { - verifyCopy(slist[i], tlist[i], true); + verifyCopy(sfs, tfs, slist[i], tlist[i], true); } } } @@ -333,7 +373,7 @@ public void testSyncWithCurrent() throws Exception { enableAndCreateFirstSnapshot(); // make changes under source - changeData(source); + changeData(dfs, source); // do the sync sync(); @@ -907,4 +947,84 @@ public void testSync11() throws Exception { deleteFilterFile(filterFile); } } + + /** + * Test DistCp ues diff option under (s)WebHDFSFileSyste. + * In this test, we are using DFS as source and WebHDFS as target + */ + @Test + public void testSyncSnapshotDiffWithWebHdfs1() throws Exception { + Path dfsSource = new Path(dfs.getUri().toString(), source); + Path webHdfsTarget = new Path(webfs.getUri().toString(), target); + + snapshotDiffWithPaths(dfsSource, webHdfsTarget); + } + + /** + * Test DistCp ues diff option under (s)WebHDFSFileSyste. + * In this test, we are using WebHDFS as source and DFS as target + */ + @Test + public void testSyncSnapshotDiffWithWebHdfs2() throws Exception { + Path webHdfsSource = new Path(webfs.getUri().toString(), source); + Path dfsTarget = new Path(dfs.getUri().toString(), target); + + snapshotDiffWithPaths(webHdfsSource, dfsTarget); + } + + /** + * Test DistCp ues diff option under (s)WebHDFSFileSyste. + * In this test, we are using WebHDFS for both source and target + */ + @Test + public void testSyncSnapshotDiffWithWebHdfs3() throws Exception { + Path webHdfsSource = new Path(webfs.getUri().toString(), source); + Path webHdfsTarget = new Path(webfs.getUri().toString(), target); + + snapshotDiffWithPaths(webHdfsSource, webHdfsTarget); + } + + private void snapshotDiffWithPaths(Path sourceFSPath, + Path targetFSPath) throws Exception { + + FileSystem sourceFS = sourceFSPath.getFileSystem(conf); + FileSystem targetFS = targetFSPath.getFileSystem(conf); + + // Initialize both source and target file system + initData(sourceFS, sourceFSPath); + initData(targetFS, targetFSPath); + + // create snapshots on both source and target side with the same name + List paths = Arrays.asList(sourceFSPath, targetFSPath); + for (Path path: paths) { + FileSystem fs = path.getFileSystem(conf); + if (fs instanceof DistributedFileSystem) { + ((DistributedFileSystem)fs).allowSnapshot(path); + } else if (fs instanceof WebHdfsFileSystem) { + ((WebHdfsFileSystem)fs).allowSnapshot(path); + } else { + throw new IOException("Unsupported fs: " + fs.getScheme()); + } + fs.createSnapshot(path, "s1"); + } + + // do some modification on source side + changeData(sourceFS, sourceFSPath); + + // create a new snapshot on source side + sourceFS.createSnapshot(sourceFSPath, "s2"); + + //try to copy the difference + final DistCpOptions options = new DistCpOptions.Builder( + Collections.singletonList(sourceFSPath), targetFSPath) + .withUseDiff("s1", "s2") + .withSyncFolder(true) + .build(); + options.appendToConf(conf); + + new DistCp(conf, options).execute(); + + verifyCopyByFs(sourceFS, targetFS, sourceFS.getFileStatus(sourceFSPath), + targetFS.getFileStatus(targetFSPath), false); + } } From 4bdaa8ed968879910bae37e424bdce4a3b8e4560 Mon Sep 17 00:00:00 2001 From: Prabhu Joseph Date: Thu, 17 Jun 2021 11:35:41 +0530 Subject: [PATCH 447/666] YARN-10820. Make GetClusterNodesRequestPBImpl thread safe. Contributed by Swathi Chandrashekar. --- .../impl/pb/GetClusterNodesRequestPBImpl.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java index c817f05045354..c25fbdfb445b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java @@ -52,7 +52,7 @@ public GetClusterNodesRequestPBImpl(GetClusterNodesRequestProto proto) { viaProto = true; } - public GetClusterNodesRequestProto getProto() { + public synchronized GetClusterNodesRequestProto getProto() { mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; @@ -60,13 +60,13 @@ public GetClusterNodesRequestProto getProto() { } @Override - public EnumSet getNodeStates() { + public synchronized EnumSet getNodeStates() { initNodeStates(); return this.states; } @Override - public void setNodeStates(final EnumSet states) { + public synchronized void setNodeStates(final EnumSet states) { initNodeStates(); this.states.clear(); if (states == null) { From 08621d58cfd2271848f8da7841842175fba1ada0 Mon Sep 17 00:00:00 2001 From: Jim Brennan Date: Mon, 28 Jun 2021 14:52:19 +0000 Subject: [PATCH 448/666] YARN-10834. Intra-queue preemption: apps that don't use defined custom resource won't be preempted. Contributed by Eric Payne. --- .../FifoIntraQueuePreemptionPlugin.java | 5 +- ...cityPreemptionPolicyIntraQueueWithDRF.java | 83 +++++++++++++++++++ 2 files changed, 86 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java index 7c3abb49254ff..a9d3c058e6569 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java @@ -288,8 +288,9 @@ private TreeSet calculateIdealAssignedResourcePerApp( // Once unallocated resource is 0, we can stop assigning ideal per app. if (Resources.lessThanOrEqual(rc, clusterResource, - queueReassignableResource, Resources.none()) || rc - .isAnyMajorResourceZeroOrNegative(queueReassignableResource)) { + queueReassignableResource, Resources.none()) || + (rc.isAnyMajorResourceZeroOrNegative(queueReassignableResource) + && context.getInQueuePreemptionConservativeDRF())) { continue; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java index ee88d3161b2e8..b3ad5ada0b322 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java @@ -18,11 +18,14 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.junit.Before; import org.junit.Test; @@ -189,6 +192,86 @@ public void testIntraQueuePreemptionFairOrderingWithStrictAndRelaxedDRF() getAppAttemptId(1)))); } + @SuppressWarnings("unchecked") + @Test + public void testIntraQueuePreemptionFairOrdering3ResourcesWithStrictAndRelaxedDRF() + throws IOException { + /** + * Continue to allow intra-queue preemption when only one of the user's + * resources is above the user limit. + * Queue structure is: + * + *

    +     *       root
    +     *     /  |
    +     *    a   b
    +     * 
    + * + * Guaranteed resource of a and b are 30720:300 and 30720:300 Total cluster + * resource = 61440:600. + * Scenario: Queue B has one running app using 61720:60 resources with no + * pending resources, and one app with no used resources and 30720:30 + * pending resources. + * + * The first part of the test is to show what happens when the conservative + * DRF property is set. Since the memory is above and the vcores is below + * the user limit, only the minimum number of containers is allowed. + * In the second part, since conservative DRF is relaxed, all containers + * needed are allowed to be preempted (minus the AM size). + */ + + conf.set(CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ORDER_POLICY, + "userlimit_first"); + conf.set(CapacitySchedulerConfiguration.PREFIX + + "root.b." + CapacitySchedulerConfiguration.ORDERING_POLICY, "fair"); + conf.setBoolean( + CapacitySchedulerConfiguration.IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + true); + String RESOURCE_1 = "res1"; + riMap.put(RESOURCE_1, ResourceInformation + .newInstance(RESOURCE_1, "", 0, ResourceTypes.COUNTABLE, 0, + Integer.MAX_VALUE)); + + ResourceUtils.initializeResourcesFromResourceInformationMap(riMap); + + String labelsConfig = "=61440:600,true;"; + String nodesConfig = // n1 has no label + "n1= res=61440:600"; + String queuesConfig = + // guaranteed,max,used,pending,reserved + "root(=[61440:600 61440:600 61440:600 30720:30 0]);" + // root + "-a(=[30720:300 61440:600 0:0 0:0 0]);" + // a + "-b(=[30720:300 61440:600 61440:60 30720:30 0]);"; // b + + String appsConfig = + "b\t" + "(1,1024:1,n1,,60,false,0:0,user1);" + // app1 in b + "b\t" + "(1,0:0,n1,,0,false,30720:30,user3);"; // app2 in b + + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + Resource ul = Resource.newInstance(30720, 300); + when(((LeafQueue)(cs.getQueue("root.b"))) + .getResourceLimitForAllUsers(any(), any(), any(), any()) + ).thenReturn(ul); + policy.editSchedule(); + + verify(eventHandler, times(0)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + reset(eventHandler); + + conf.setBoolean( + CapacitySchedulerConfiguration.IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + false); + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + when(((LeafQueue)(cs.getQueue("root.b"))) + .getResourceLimitForAllUsers(any(), any(), any(), any()) + ).thenReturn(ul); + policy.editSchedule(); + verify(eventHandler, times(29)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + } + @Test public void testIntraQueuePreemptionWithDominantVCoreResource() throws IOException { From a05a50f4e7df4e9b77ead1e57b264e1275af8b26 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Tue, 29 Jun 2021 15:01:02 +0530 Subject: [PATCH 449/666] HDFS-16092. Avoid creating LayoutFlags redundant objects (#3150) --- .../org/apache/hadoop/hdfs/protocol/LayoutFlags.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java index 1d14973cfed93..1cf12a0411c2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java @@ -31,14 +31,15 @@ */ @InterfaceAudience.Private public class LayoutFlags { + /** - * Load a LayoutFlags object from a stream. + * Read next int from given input stream. If the value is not 0 (unsupported + * feature flags), throw appropriate IOException. * * @param in The stream to read from. - * @throws IOException + * @throws IOException If next byte read from given stream is not 0. */ - public static LayoutFlags read(DataInputStream in) - throws IOException { + public static void read(DataInputStream in) throws IOException { int length = in.readInt(); if (length < 0) { throw new IOException("The length of the feature flag section " + @@ -47,7 +48,6 @@ public static LayoutFlags read(DataInputStream in) throw new IOException("Found feature flags which we can't handle. " + "Please upgrade your software."); } - return new LayoutFlags(); } private LayoutFlags() { From 13584cae232e774d40b0515b90400c9813cb22d8 Mon Sep 17 00:00:00 2001 From: litao Date: Tue, 29 Jun 2021 18:15:12 +0800 Subject: [PATCH 450/666] HDFS-16089. Add metric EcReconstructionValidateTimeMillis for StripedBlockReconstructor (#3146) --- .../datanode/erasurecode/StripedBlockReconstructor.java | 7 +++++++ .../hdfs/server/datanode/metrics/DataNodeMetrics.java | 6 ++++++ .../hdfs/server/namenode/TestReconstructStripedBlocks.java | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java index cd59f515283d6..3ead793542c7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java @@ -135,9 +135,16 @@ private void reconstructTargets(int toReconstructLen) throws IOException { resetBuffers(inputs); DataNodeFaultInjector.get().badDecoding(outputs); + long start = Time.monotonicNow(); try { getValidator().validate(inputs, erasedIndices, outputs); + long validateEnd = Time.monotonicNow(); + getDatanode().getMetrics().incrECReconstructionValidateTime( + validateEnd - start); } catch (InvalidDecodingException e) { + long validateFailedEnd = Time.monotonicNow(); + getDatanode().getMetrics().incrECReconstructionValidateTime( + validateFailedEnd - start); getDatanode().getMetrics().incrECInvalidReconstructionTasks(); throw e; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java index 2debc3ee0a7e6..5203d7bf87f89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java @@ -168,6 +168,8 @@ public class DataNodeMetrics { private MutableCounterLong ecReconstructionDecodingTimeMillis; @Metric("Milliseconds spent on write by erasure coding worker") private MutableCounterLong ecReconstructionWriteTimeMillis; + @Metric("Milliseconds spent on validating by erasure coding worker") + private MutableCounterLong ecReconstructionValidateTimeMillis; @Metric("Sum of all BPServiceActors command queue length") private MutableCounterLong sumOfActorCommandQueueLength; @Metric("Num of processed commands of all BPServiceActors") @@ -629,6 +631,10 @@ public void incrECReconstructionDecodingTime(long millis) { ecReconstructionDecodingTimeMillis.incr(millis); } + public void incrECReconstructionValidateTime(long millis) { + ecReconstructionValidateTimeMillis.incr(millis); + } + public DataNodeUsageReport getDNUsageReport(long timeSinceLastReport) { return dnUsageReportUtil.getUsageReport(bytesWritten.value(), bytesRead .value(), totalWriteTime.value(), totalReadTime.value(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java index 94a9ae1a4e0f0..fc307bf84d86f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java @@ -364,7 +364,7 @@ public void testCountLiveReplicas() throws Exception { } } - @Test(timeout=120000) // 1 min timeout + @Test(timeout=120000) // 2 min timeout public void testReconstructionWork() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); From de37111204f5b81d0c6e614aaea90a669987b193 Mon Sep 17 00:00:00 2001 From: Wanqiang Ji Date: Wed, 30 Jun 2021 09:32:16 +0800 Subject: [PATCH 451/666] HADOOP-17777. Update clover-maven-plugin version from 3.3.0 to 4.4.1 (#3152) --- BUILDING.txt | 2 +- hadoop-maven-plugins/pom.xml | 4 ++-- pom.xml | 14 ++++++-------- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/BUILDING.txt b/BUILDING.txt index 4d18f698b9543..78c611fb7d604 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -143,7 +143,7 @@ Maven build goals: * Run checkstyle : mvn compile checkstyle:checkstyle * Install JAR in M2 cache : mvn install * Deploy JAR to Maven repo : mvn deploy - * Run clover : mvn test -Pclover [-DcloverLicenseLocation=${user.name}/.clover.license] + * Run clover : mvn test -Pclover * Run Rat : mvn apache-rat:check * Build javadocs : mvn javadoc:javadoc * Build distribution : mvn package [-Pdist][-Pdocs][-Psrc][-Pnative][-Dtar][-Preleasedocs][-Pyarn-ui] diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml index 116565d780aae..a5473258c35f6 100644 --- a/hadoop-maven-plugins/pom.xml +++ b/hadoop-maven-plugins/pom.xml @@ -126,8 +126,8 @@ classes during plugin execution when running a build with Clover enabled. --> - com.atlassian.maven.plugins - maven-clover2-plugin + org.openclover + clover-maven-plugin true diff --git a/pom.xml b/pom.xml index 40b94202bdfee..7ec3ade62ab51 100644 --- a/pom.xml +++ b/pom.xml @@ -108,7 +108,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x 3.0.1 0.12 2.4 - 3.3.0 + 4.4.1 2.5.0 1.0.0 3.1.0 @@ -350,9 +350,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x - com.atlassian.maven.plugins - maven-clover2-plugin - ${maven-clover2-plugin.version} + org.openclover + clover-maven-plugin + ${clover-maven-plugin.version} org.apache.felix @@ -679,7 +679,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x
    - ${user.home}/.clover.license ${project.build.directory}/clover/hadoop-coverage.db true true - 9.4.40.v20210413 + 9.4.43.v20210629 _ _ From d914c1e958a13ac9340d29aa45b5dc6a5fb7c8b8 Mon Sep 17 00:00:00 2001 From: zhuqi-lucas <821684824@qq.com> Date: Thu, 22 Jul 2021 20:30:43 +0800 Subject: [PATCH 501/666] YARN-10657. We should make max application per queue to support node label. Contributed by Andras Gyori. --- .../scheduler/capacity/AbstractCSQueue.java | 46 ++++++++++--------- .../CapacitySchedulerQueueManager.java | 5 ++ .../scheduler/capacity/LeafQueue.java | 9 +--- ...estCapacitySchedulerAutoQueueCreation.java | 10 ++-- .../scheduler/capacity/TestLeafQueue.java | 39 ++++++++++++++++ .../src/site/markdown/CapacityScheduler.md | 2 +- 6 files changed, 77 insertions(+), 34 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 2f6ca5a15970c..b05fb50462465 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -1528,21 +1528,30 @@ private Resource getMinResourceNormalized(String name, } void updateMaxAppRelatedField(CapacitySchedulerConfiguration conf, - LeafQueue leafQueue, String label) { + LeafQueue leafQueue) { int maxApplications = conf.getMaximumApplicationsPerQueue(queuePath); + int maxGlobalPerQueueApps = conf.getGlobalMaximumApplicationsPerQueue(); + String maxLabel = RMNodeLabelsManager.NO_LABEL; + if (maxApplications < 0) { - int maxGlobalPerQueueApps = conf.getGlobalMaximumApplicationsPerQueue(); - if (maxGlobalPerQueueApps > 0) { - // In absolute mode, should - // shrink when change to corresponding label capacity. - maxApplications = this.capacityConfigType - != CapacityConfigType.ABSOLUTE_RESOURCE ? - maxGlobalPerQueueApps : - (int) (maxGlobalPerQueueApps * queueCapacities - .getAbsoluteCapacity(label)); - } else{ - maxApplications = (int) (conf.getMaximumSystemApplications() - * queueCapacities.getAbsoluteCapacity(label)); + for (String label : configuredNodeLabels) { + int maxApplicationsByLabel = 0; + if (maxGlobalPerQueueApps > 0) { + // In absolute mode, should + // shrink when change to corresponding label capacity. + maxApplicationsByLabel = this.capacityConfigType + != CapacityConfigType.ABSOLUTE_RESOURCE ? + maxGlobalPerQueueApps : + (int) (maxGlobalPerQueueApps * queueCapacities + .getAbsoluteCapacity(label)); + } else { + maxApplicationsByLabel = (int) (conf.getMaximumSystemApplications() + * queueCapacities.getAbsoluteCapacity(label)); + } + if (maxApplicationsByLabel > maxApplications) { + maxApplications = maxApplicationsByLabel; + maxLabel = label; + } } } leafQueue.setMaxApplications(maxApplications); @@ -1560,9 +1569,9 @@ void updateMaxAppRelatedField(CapacitySchedulerConfiguration conf, "update max app related, maxApplications=" + maxApplications + ", maxApplicationsPerUser=" + maxApplicationsPerUser + ", Abs Cap:" + queueCapacities - .getAbsoluteCapacity(label) + ", Cap: " + queueCapacities - .getCapacity(label) + ", MaxCap : " + queueCapacities - .getMaximumCapacity(label)); + .getAbsoluteCapacity(maxLabel) + ", Cap: " + queueCapacities + .getCapacity(maxLabel) + ", MaxCap : " + queueCapacities + .getMaximumCapacity(maxLabel)); } private void deriveCapacityFromAbsoluteConfigurations(String label, @@ -1643,11 +1652,6 @@ void updateEffectiveResources(Resource clusterResource) { deriveCapacityFromAbsoluteConfigurations(label, clusterResource, rc); // Re-visit max applications for a queue based on absolute capacity if // needed. - if (this instanceof LeafQueue) { - LeafQueue leafQueue = (LeafQueue) this; - CapacitySchedulerConfiguration conf = csContext.getConfiguration(); - updateMaxAppRelatedField(conf, leafQueue, label); - } } else{ queueResourceQuotas.setEffectiveMinResource(label, Resources .multiply(resourceByLabel, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java index e5b41cede3ae2..a26fadf4342ea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java @@ -632,6 +632,11 @@ public ConfiguredNodeLabels getConfiguredNodeLabels() { return configuredNodeLabels; } + @VisibleForTesting + public void reinitConfiguredNodeLabels(CapacitySchedulerConfiguration conf) { + this.configuredNodeLabels = new ConfiguredNodeLabels(conf); + } + private LeafQueue createAutoQueue(ApplicationPlacementContext queue) throws SchedulerDynamicEditException { List parentsToCreate = determineMissingParents(queue); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 19de6db21b721..44727fb1db168 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -1945,14 +1945,9 @@ public void updateClusterResource(Resource clusterResource, updateAbsoluteCapacities(); - // If maxApplications not set, use the system total max app, apply newly - // calculated abs capacity of the queue. - // When add new queue, the parent queue's other children should also - // update the max app. - super.updateMaxAppRelatedField(csContext.getConfiguration(), - this, CommonNodeLabelsManager.NO_LABEL); - super.updateEffectiveResources(clusterResource); + super.updateMaxAppRelatedField(csContext.getConfiguration(), + this); updateCurrentResourceLimits(currentResourceLimits, clusterResource); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java index d5c7bc58ca84e..c784b19865ed6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java @@ -141,7 +141,7 @@ public void testAutoCreateLeafQueueCreation() throws Exception { validateInitialQueueEntitlement(parentQueue, USER0, expectedChildQueueAbsCapacity, accessibleNodeLabelsOnC); - validateUserAndAppLimits(autoCreatedLeafQueue, 1000, 1000); + validateUserAndAppLimits(autoCreatedLeafQueue, 4000, 4000); validateContainerLimits(autoCreatedLeafQueue); assertTrue(autoCreatedLeafQueue @@ -911,7 +911,7 @@ public void testReinitializeQueuesWithAutoCreatedLeafQueues() AutoCreatedLeafQueue user0Queue = (AutoCreatedLeafQueue) newCS.getQueue( USER1); validateCapacities(user0Queue, 0.5f, 0.15f, 1.0f, 0.5f); - validateUserAndAppLimits(user0Queue, 1500, 1500); + validateUserAndAppLimits(user0Queue, 4000, 4000); //update leaf queue template capacities conf.setAutoCreatedLeafQueueConfigCapacity(C, 30f); @@ -919,7 +919,7 @@ public void testReinitializeQueuesWithAutoCreatedLeafQueues() newCS.reinitialize(conf, newMockRM.getRMContext()); validateCapacities(user0Queue, 0.3f, 0.09f, 0.4f, 0.2f); - validateUserAndAppLimits(user0Queue, 900, 900); + validateUserAndAppLimits(user0Queue, 4000, 4000); //submit app1 as USER3 submitApp(newMockRM, parentQueue, USER3, USER3, 3, 1); @@ -927,7 +927,7 @@ public void testReinitializeQueuesWithAutoCreatedLeafQueues() (AutoCreatedLeafQueue) newCS.getQueue(USER1); validateCapacities(user3Queue, 0.3f, 0.09f, 0.4f,0.2f); - validateUserAndAppLimits(user3Queue, 900, 900); + validateUserAndAppLimits(user3Queue, 4000, 4000); //submit app1 as USER1 - is already activated. there should be no diff // in capacities @@ -935,7 +935,7 @@ public void testReinitializeQueuesWithAutoCreatedLeafQueues() validateCapacities(user3Queue, 0.3f, 0.09f, 0.4f,0.2f); - validateUserAndAppLimits(user3Queue, 900, 900); + validateUserAndAppLimits(user3Queue, 4000, 4000); validateContainerLimits(user3Queue); GuaranteedOrZeroCapacityOverTimePolicy autoCreatedQueueManagementPolicy = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index b6cc132da533c..7a310138da8e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -5120,6 +5120,45 @@ private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { return queue; } + @Test + public void testMaxApplicationsWithNodeLabels() throws IOException { + CapacitySchedulerConfiguration conf = csConf; + String rootChild = root.getChildQueues().get(0).getQueuePath(); + + conf.setCapacityByLabel(ROOT, "test", 100); + conf.setCapacityByLabel(rootChild, "test", 100); + conf.setCapacityByLabel(rootChild + "." + A, "test", 20); + conf.setCapacityByLabel(rootChild + "." + B, "test", 40); + conf.setCapacityByLabel(rootChild + "." + C, "test", 10); + conf.setCapacityByLabel(rootChild + "." + C + "." + C1, "test", 100); + conf.setCapacityByLabel(rootChild + "." + D, "test", 30); + conf.setCapacityByLabel(rootChild + "." + E, "test", 0); + cs.getCapacitySchedulerQueueManager().reinitConfiguredNodeLabels(conf); + cs.setMaxRunningAppsEnforcer(new CSMaxRunningAppsEnforcer(cs)); + cs.reinitialize(conf, cs.getRMContext()); + + LeafQueue e = (LeafQueue) cs.getQueue("e"); + // Maximum application should be calculated with the default node label + Assert.assertEquals("Maximum application is not calculated properly", + (int)(conf.getMaximumSystemApplications() + * e.getAbsoluteCapacity()), e.getMaxApplications()); + + conf.setCapacityByLabel(rootChild + "." + A, "test", 10); + conf.setCapacityByLabel(rootChild + "." + B, "test", 10); + conf.setCapacityByLabel(rootChild + "." + C, "test", 10); + conf.setCapacityByLabel(rootChild + "." + D, "test", 10); + conf.setCapacityByLabel(rootChild + "." + E, "test", 60); + cs.reinitialize(conf, cs.getRMContext()); + + e = (LeafQueue) cs.getQueue("e"); + // Maximum application is now determined by test label, because that would + // yield a higher value than with default node label + Assert.assertEquals("Maximum application is not calculated properly", + (int)(conf.getMaximumSystemApplications() * + e.getQueueCapacities().getAbsoluteCapacity("test")), + e.getMaxApplications()); + } + @After public void tearDown() throws Exception { if (cs != null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md index 4d45e7b581cf8..ebec79fe9f84d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md @@ -141,7 +141,7 @@ Configuration | Property | Description | |:---- |:---- | -| `yarn.scheduler.capacity.maximum-applications` / `yarn.scheduler.capacity..maximum-applications` | Maximum number of applications in the system which can be concurrently active both running and pending. Limits on each queue are directly proportional to their queue capacities and user limits. This is a hard limit and any applications submitted when this limit is reached will be rejected. Default is 10000. This can be set for all queues with `yarn.scheduler.capacity.maximum-applications` and can also be overridden on a per queue basis by setting `yarn.scheduler.capacity..maximum-applications`. Integer value expected. | +| `yarn.scheduler.capacity.maximum-applications` / `yarn.scheduler.capacity..maximum-applications` | Maximum number of applications in the system which can be concurrently active both running and pending. Limits on each queue are directly proportional to their queue capacities and user limits. This is a hard limit and any applications submitted when this limit is reached will be rejected. Default is 10000. This can be set for all queues with `yarn.scheduler.capacity.maximum-applications` and can also be overridden on a per queue basis by setting `yarn.scheduler.capacity..maximum-applications`. When this property is not set for a specific queue path, the maximum application number is calculated by taking all configured node labels into consideration, and choosing the highest possible value. Integer value expected. | | `yarn.scheduler.capacity.maximum-am-resource-percent` / `yarn.scheduler.capacity..maximum-am-resource-percent` | Maximum percent of resources in the cluster which can be used to run application masters - controls number of concurrent active applications. Limits on each queue are directly proportional to their queue capacities and user limits. Specified as a float - ie 0.5 = 50%. Default is 10%. This can be set for all queues with `yarn.scheduler.capacity.maximum-am-resource-percent` and can also be overridden on a per queue basis by setting `yarn.scheduler.capacity..maximum-am-resource-percent` | | `yarn.scheduler.capacity.max-parallel-apps` / `yarn.scheduler.capacity..max-parallel-apps` | Maximum number of applications that can run at the same time. Unlike to `maximum-applications`, application submissions are *not* rejected when this limit is reached. Instead they stay in `ACCEPTED` state until they are eligible to run. This can be set for all queues with `yarn.scheduler.capacity.max-parallel-apps` and can also be overridden on a per queue basis by setting `yarn.scheduler.capacity..max-parallel-apps`. Integer value is expected. By default, there is no limit. | From e31e4e454971dfcd2db0170e22ebde7113020686 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 22 Jul 2021 18:42:55 +0530 Subject: [PATCH 502/666] HADOOP-17813. Checkstyle - Allow line length: 100 Signed-off-by: Sean Busbey --- .../src/main/resources/checkstyle/checkstyle.xml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml index c09eb953d6396..7ca0123ee5570 100644 --- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml +++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml @@ -69,7 +69,9 @@ - + + + From d2ce8cb90e4dae7a8870d7bb27ed561a28180058 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Fri, 23 Jul 2021 10:08:55 +0530 Subject: [PATCH 503/666] HADOOP-17808. ipc.Client to set interrupt flag after catching InterruptedException (#3219) --- .../src/main/java/org/apache/hadoop/ipc/Client.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index abdc81bb87ad5..212ab3695c576 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -1051,7 +1051,10 @@ private synchronized boolean waitForWork() { if (timeout>0) { try { wait(timeout); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + LOG.info("Interrupted while waiting to retrieve RPC response.", e); + Thread.currentThread().interrupt(); + } } } @@ -1383,6 +1386,9 @@ public void stop() { try { emptyCondition.wait(); } catch (InterruptedException e) { + LOG.info("Interrupted while waiting on all connections to be closed.", + e); + Thread.currentThread().interrupt(); } } } From 0e55933a812498a66c4caa9f40555fb5527d6bdc Mon Sep 17 00:00:00 2001 From: Akshat Bordia <31816865+akshatb1@users.noreply.github.com> Date: Fri, 23 Jul 2021 17:07:51 +0530 Subject: [PATCH 504/666] YARN-10829. Support getApplications API in FederationClientInterceptor (#3135) YARN-10829. Support getApplications API in FederationClientInterceptor (#3135) --- .../GetApplicationsResponse.java | 12 ++ .../hadoop/yarn/conf/YarnConfiguration.java | 9 ++ .../conf/TestYarnConfigurationFields.java | 2 + .../clientrm/FederationClientInterceptor.java | 48 +++++- .../clientrm/RouterYarnClientUtils.java | 137 ++++++++++++++++++ .../TestFederationClientInterceptor.java | 118 ++++++++++++++- .../clientrm/TestRouterYarnClientUtils.java | 117 +++++++++++++++ 7 files changed, 438 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsResponse.java index 45e8fdf097115..dd411daec1fe5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsResponse.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsResponse.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import java.util.ArrayList; +import java.util.Collection; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -52,6 +54,16 @@ public static GetApplicationsResponse newInstance( return response; } + @Private + @Unstable + public static GetApplicationsResponse newInstance( + Collection applications) { + GetApplicationsResponse response = + Records.newRecord(GetApplicationsResponse.class); + response.setApplicationList(new ArrayList<>(applications)); + return response; + } + /** * Get ApplicationReport for applications. * @return ApplicationReport for applications diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 2ebf79cfae3f3..d3cec1402641b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -3991,6 +3991,15 @@ public static boolean isAclEnabled(Configuration conf) { ROUTER_PREFIX + "submit.retry"; public static final int DEFAULT_ROUTER_CLIENTRM_SUBMIT_RETRY = 3; + /** + * The interceptor class used in FederationClientInterceptor should return + * partial ApplicationReports. + */ + public static final String ROUTER_CLIENTRM_PARTIAL_RESULTS_ENABLED = + ROUTER_PREFIX + "partial-result.enabled"; + public static final boolean DEFAULT_ROUTER_CLIENTRM_PARTIAL_RESULTS_ENABLED = + false; + public static final String ROUTER_WEBAPP_PREFIX = ROUTER_PREFIX + "webapp."; public static final String ROUTER_USER_CLIENT_THREADS_SIZE = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java index 3dcd5cc3bed60..84e4b561e5d16 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java @@ -184,6 +184,8 @@ public void initializeMemberVariables() { configurationPrefixToSkipCompare .add(YarnConfiguration.ROUTER_CLIENTRM_SUBMIT_RETRY); + configurationPrefixToSkipCompare + .add(YarnConfiguration.ROUTER_CLIENTRM_PARTIAL_RESULTS_ENABLED); configurationPrefixToSkipCompare .add(YarnConfiguration.ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED); configurationPrefixToSkipCompare diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java index 7e8e7af3c7ae3..08636bbc10b06 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.lang.reflect.Method; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Random; @@ -161,6 +162,7 @@ public class FederationClientInterceptor private RouterMetrics routerMetrics; private ThreadPoolExecutor executorService; private final Clock clock = new MonotonicClock(); + private boolean returnPartialReport; @Override public void init(String userName) { @@ -196,6 +198,10 @@ public void init(String userName) { clientRMProxies = new ConcurrentHashMap(); routerMetrics = RouterMetrics.getMetrics(); + + returnPartialReport = conf.getBoolean( + YarnConfiguration.ROUTER_CLIENTRM_PARTIAL_RESULTS_ENABLED, + YarnConfiguration.DEFAULT_ROUTER_CLIENTRM_PARTIAL_RESULTS_ENABLED); } @Override @@ -599,10 +605,44 @@ public GetApplicationReportResponse getApplicationReport( return response; } + /** + * The Yarn Router will forward the request to all the Yarn RMs in parallel, + * after that it will group all the ApplicationReports by the ApplicationId. + * + * Possible failure: + * + * Client: identical behavior as {@code ClientRMService}. + * + * Router: the Client will timeout and resubmit the request. + * + * ResourceManager: the Router calls each Yarn RM in parallel. In case a + * Yarn RM fails, a single call will timeout. However the Router will + * merge the ApplicationReports it got, and provides a partial list to + * the client. + * + * State Store: the Router will timeout and it will retry depending on the + * FederationFacade settings - if the failure happened before the select + * operation. + */ @Override public GetApplicationsResponse getApplications(GetApplicationsRequest request) throws YarnException, IOException { - throw new NotImplementedException("Code is not implemented"); + if (request == null) { + RouterServerUtil.logAndThrowException( + "Missing getApplications request.", + null); + } + Map subclusters = + federationFacade.getSubClusters(true); + ClientMethod remoteMethod = new ClientMethod("getApplications", + new Class[] {GetApplicationsRequest.class}, new Object[] {request}); + Map applications = + invokeConcurrent(subclusters.keySet(), remoteMethod, + GetApplicationsResponse.class); + + // Merge the Application Reports + return RouterYarnClientUtils.mergeApplications(applications.values(), + returnPartialReport); } @Override @@ -676,6 +716,12 @@ public Object call() throws Exception { return results; } + Map invokeConcurrent(Collection clusterIds, + ClientMethod request, Class clazz) throws YarnException, IOException { + ArrayList clusterIdList = new ArrayList<>(clusterIds); + return invokeConcurrent(clusterIdList, request, clazz); + } + @Override public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) throws YarnException, IOException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterYarnClientUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterYarnClientUtils.java index 50abcf40a8088..9c36f30952a9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterYarnClientUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterYarnClientUtils.java @@ -18,14 +18,25 @@ package org.apache.hadoop.yarn.server.router.clientrm; import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; +import org.apache.hadoop.yarn.server.uam.UnmanagedApplicationManager; +import org.apache.hadoop.yarn.util.resource.Resources; /** * Util class for Router Yarn client API calls. */ public final class RouterYarnClientUtils { + private final static String PARTIAL_REPORT = "Partial Report "; + private RouterYarnClientUtils() { } @@ -52,4 +63,130 @@ public static GetClusterMetricsResponse merge( } return GetClusterMetricsResponse.newInstance(tmp); } + + /** + * Merges a list of ApplicationReports grouping by ApplicationId. + * Our current policy is to merge the application reports from the reachable + * SubClusters. + * @param responses a list of ApplicationResponse to merge + * @param returnPartialResult if the merge ApplicationReports should contain + * partial result or not + * @return the merged ApplicationsResponse + */ + public static GetApplicationsResponse mergeApplications( + Collection responses, + boolean returnPartialResult){ + Map federationAM = new HashMap<>(); + Map federationUAMSum = new HashMap<>(); + + for (GetApplicationsResponse appResponse : responses){ + for (ApplicationReport appReport : appResponse.getApplicationList()){ + ApplicationId appId = appReport.getApplicationId(); + // Check if this ApplicationReport is an AM + if (!appReport.isUnmanagedApp()) { + // Insert in the list of AM + federationAM.put(appId, appReport); + // Check if there are any UAM found before + if (federationUAMSum.containsKey(appId)) { + // Merge the current AM with the found UAM + mergeAMWithUAM(appReport, federationUAMSum.get(appId)); + // Remove the sum of the UAMs + federationUAMSum.remove(appId); + } + // This ApplicationReport is an UAM + } else if (federationAM.containsKey(appId)) { + // Merge the current UAM with its own AM + mergeAMWithUAM(federationAM.get(appId), appReport); + } else if (federationUAMSum.containsKey(appId)) { + // Merge the current UAM with its own UAM and update the list of UAM + ApplicationReport mergedUAMReport = + mergeUAMWithUAM(federationUAMSum.get(appId), appReport); + federationUAMSum.put(appId, mergedUAMReport); + } else { + // Insert in the list of UAM + federationUAMSum.put(appId, appReport); + } + } + } + // Check the remaining UAMs are depending or not from federation + for (ApplicationReport appReport : federationUAMSum.values()) { + if (mergeUamToReport(appReport.getName(), returnPartialResult)) { + federationAM.put(appReport.getApplicationId(), appReport); + } + } + + return GetApplicationsResponse.newInstance(federationAM.values()); + } + + private static ApplicationReport mergeUAMWithUAM(ApplicationReport uam1, + ApplicationReport uam2){ + uam1.setName(PARTIAL_REPORT + uam1.getApplicationId()); + mergeAMWithUAM(uam1, uam2); + return uam1; + } + + private static void mergeAMWithUAM(ApplicationReport am, + ApplicationReport uam){ + ApplicationResourceUsageReport amResourceReport = + am.getApplicationResourceUsageReport(); + + ApplicationResourceUsageReport uamResourceReport = + uam.getApplicationResourceUsageReport(); + + amResourceReport.setNumUsedContainers( + amResourceReport.getNumUsedContainers() + + uamResourceReport.getNumUsedContainers()); + + amResourceReport.setNumReservedContainers( + amResourceReport.getNumReservedContainers() + + uamResourceReport.getNumReservedContainers()); + + amResourceReport.setUsedResources(Resources.add( + amResourceReport.getUsedResources(), + uamResourceReport.getUsedResources())); + + amResourceReport.setReservedResources(Resources.add( + amResourceReport.getReservedResources(), + uamResourceReport.getReservedResources())); + + amResourceReport.setNeededResources(Resources.add( + amResourceReport.getNeededResources(), + uamResourceReport.getNeededResources())); + + amResourceReport.setMemorySeconds( + amResourceReport.getMemorySeconds() + + uamResourceReport.getMemorySeconds()); + + amResourceReport.setVcoreSeconds( + amResourceReport.getVcoreSeconds() + + uamResourceReport.getVcoreSeconds()); + + amResourceReport.setQueueUsagePercentage( + amResourceReport.getQueueUsagePercentage() + + uamResourceReport.getQueueUsagePercentage()); + + amResourceReport.setClusterUsagePercentage( + amResourceReport.getClusterUsagePercentage() + + uamResourceReport.getClusterUsagePercentage()); + + am.setApplicationResourceUsageReport(amResourceReport); + } + + /** + * Returns whether or not to add an unmanaged application to the report. + * @param appName Application Name + * @param returnPartialResult if the merge ApplicationReports should contain + * partial result or not + */ + private static boolean mergeUamToReport(String appName, + boolean returnPartialResult){ + if (returnPartialResult) { + return true; + } + if (appName == null) { + return false; + } + return !(appName.startsWith(UnmanagedApplicationManager.APP_NAME) || + appName.startsWith(PARTIAL_REPORT)); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java index 125dfcfbeeed9..4ffd3cb2e1df3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java @@ -22,16 +22,20 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; import java.util.List; - import java.util.Map; +import java.util.Set; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.yarn.MockApps; -import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; @@ -40,11 +44,12 @@ import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager; @@ -531,4 +536,109 @@ public void testGetClusterMetricsRequest() throws YarnException, IOException { GetClusterMetricsResponse.class); Assert.assertEquals(true, clusterMetrics.isEmpty()); } + + /** + * This test validates the correctness of + * GetApplicationsResponse in case the + * application exists in the cluster. + */ + @Test + public void testGetApplicationsResponse() + throws YarnException, IOException, InterruptedException { + LOG.info("Test FederationClientInterceptor: Get Applications Response"); + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + + SubmitApplicationRequest request = mockSubmitApplicationRequest(appId); + SubmitApplicationResponse response = interceptor.submitApplication(request); + + Assert.assertNotNull(response); + Assert.assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId)); + + Set appTypes = Collections.singleton("MockApp"); + GetApplicationsRequest requestGet = + GetApplicationsRequest.newInstance(appTypes); + + GetApplicationsResponse responseGet = + interceptor.getApplications(requestGet); + + Assert.assertNotNull(responseGet); + } + + /** + * This test validates + * the correctness of GetApplicationsResponse in case of + * empty request. + */ + @Test + public void testGetApplicationsNullRequest() throws Exception { + LOG.info("Test FederationClientInterceptor: Get Applications request"); + LambdaTestUtils.intercept(YarnException.class, + "Missing getApplications request.", + () -> interceptor.getApplications(null)); + } + + /** + * This test validates + * the correctness of GetApplicationsResponse in case applications + * with given type does not exist. + */ + @Test + public void testGetApplicationsApplicationTypeNotExists() throws Exception{ + LOG.info("Test FederationClientInterceptor: Application with type does " + + "not exist"); + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + + SubmitApplicationRequest request = mockSubmitApplicationRequest(appId); + SubmitApplicationResponse response = interceptor.submitApplication(request); + + Assert.assertNotNull(response); + Assert.assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId)); + + Set appTypes = Collections.singleton("SPARK"); + + GetApplicationsRequest requestGet = + GetApplicationsRequest.newInstance(appTypes); + + GetApplicationsResponse responseGet = + interceptor.getApplications(requestGet); + + Assert.assertNotNull(responseGet); + Assert.assertTrue(responseGet.getApplicationList().isEmpty()); + } + + /** + * This test validates + * the correctness of GetApplicationsResponse in case applications + * with given YarnApplicationState does not exist. + */ + @Test + public void testGetApplicationsApplicationStateNotExists() throws Exception{ + LOG.info("Test FederationClientInterceptor:" + + " Application with state does not exist"); + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + + SubmitApplicationRequest request = mockSubmitApplicationRequest(appId); + SubmitApplicationResponse response = interceptor.submitApplication(request); + + Assert.assertNotNull(response); + Assert.assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId)); + + EnumSet applicationStates = EnumSet.noneOf( + YarnApplicationState.class); + applicationStates.add(YarnApplicationState.KILLED); + + GetApplicationsRequest requestGet = + GetApplicationsRequest.newInstance(applicationStates); + + GetApplicationsResponse responseGet = + interceptor.getApplications(requestGet); + + Assert.assertNotNull(responseGet); + Assert.assertTrue(responseGet.getApplicationList().isEmpty()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterYarnClientUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterYarnClientUtils.java index d062f9d0b590f..3b64c2310768c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterYarnClientUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterYarnClientUtils.java @@ -19,8 +19,19 @@ package org.apache.hadoop.yarn.server.router.clientrm; import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; +import org.apache.hadoop.yarn.server.uam.UnmanagedApplicationManager; import org.junit.Assert; import org.junit.Test; @@ -29,6 +40,8 @@ */ public class TestRouterYarnClientUtils { + private final static String PARTIAL_REPORT = "Partial Report "; + @Test public void testClusterMetricsMerge() { ArrayList responses = new ArrayList<>(); @@ -54,4 +67,108 @@ public GetClusterMetricsResponse getClusterMetricsResponse(int value) { metrics.setNumNodeManagers(value); return GetClusterMetricsResponse.newInstance(metrics); } + + /** + * This test validates the correctness of + * RouterYarnClientUtils#mergeApplications. + */ + @Test + public void testMergeApplications() { + ArrayList responses = new ArrayList<>(); + responses.add(getApplicationsResponse(1, false)); + responses.add(getApplicationsResponse(2, false)); + GetApplicationsResponse result = RouterYarnClientUtils. + mergeApplications(responses, false); + Assert.assertNotNull(result); + Assert.assertEquals(2, result.getApplicationList().size()); + + String appName1 = result.getApplicationList().get(0).getName(); + String appName2 = result.getApplicationList().get(1).getName(); + + // Check that no Unmanaged applications are added to the result + Assert.assertEquals(false, + appName1.contains(UnmanagedApplicationManager.APP_NAME)); + Assert.assertEquals(false, + appName2.contains(UnmanagedApplicationManager.APP_NAME)); + } + + /** + * This test validates the correctness of + * RouterYarnClientUtils#mergeApplications. + */ + @Test + public void testMergeUnmanagedApplications() { + ArrayList responses = new ArrayList<>(); + responses.add(getApplicationsResponse(1, true)); + + // Check response if partial results are enabled + GetApplicationsResponse result = RouterYarnClientUtils. + mergeApplications(responses, true); + Assert.assertNotNull(result); + Assert.assertEquals(1, result.getApplicationList().size()); + ApplicationReport appReport = result.getApplicationList().iterator().next(); + String appName = appReport.getName(); + Assert.assertTrue(appName.startsWith(PARTIAL_REPORT)); + + // Check ApplicationResourceUsageReport merge + ApplicationResourceUsageReport resourceUsageReport = + appReport.getApplicationResourceUsageReport(); + + Assert.assertEquals(2, resourceUsageReport.getNumUsedContainers()); + Assert.assertEquals(4, resourceUsageReport.getNumReservedContainers()); + + // Check response if partial results are disabled + result = RouterYarnClientUtils. + mergeApplications(responses, false); + Assert.assertNotNull(result); + Assert.assertTrue(result.getApplicationList().isEmpty()); + } + + /** + * This generates a GetApplicationsResponse with 2 applications with + * same ApplicationId. + * @param value Used as Id in ApplicationId + * @param uamOnly If set to true, only unmanaged applications are added in + * response, else one managed and one unmanaged applications + * are added with same ApplicationId. + * @return GetApplicationsResponse + */ + private GetApplicationsResponse getApplicationsResponse(int value, + boolean uamOnly) { + String appName = uamOnly? UnmanagedApplicationManager.APP_NAME: "appname"; + List applications = new ArrayList<>(); + + // Create first application report. This is a managed app by default. + // If uamOnly is true, this becomes unmanaged application. + ApplicationId appId = ApplicationId.newInstance(1234, value); + Resource resource = Resource.newInstance(1024, 1); + ApplicationResourceUsageReport appResourceUsageReport = + ApplicationResourceUsageReport.newInstance( + 1, 2, resource, resource, + resource, null, 0.1f, + 0.1f, null); + + ApplicationReport appReport = ApplicationReport.newInstance( + appId, ApplicationAttemptId.newInstance(appId, 1), + "user", "queue", appName, "host", + 124, null, YarnApplicationState.RUNNING, + "diagnostics", "url", 0, 0, + 0, FinalApplicationStatus.SUCCEEDED, appResourceUsageReport, "N/A", + 0.53789f, "YARN", null, null, uamOnly, null, null, null); + + // Create second application report. This is always unmanaged application. + ApplicationId appId2 = ApplicationId.newInstance(1234, value); + ApplicationReport appReport2 = ApplicationReport.newInstance( + appId2, ApplicationAttemptId.newInstance(appId, 1), + "user", "queue", UnmanagedApplicationManager.APP_NAME, "host", + 124, null, YarnApplicationState.RUNNING, + "diagnostics", "url", 0, 0, + 0, FinalApplicationStatus.SUCCEEDED, appResourceUsageReport, "N/A", + 0.53789f, "YARN", null, null, true, null, null, null); + + applications.add(appReport); + applications.add(appReport2); + + return GetApplicationsResponse.newInstance(applications); + } } From e7de0ca895cf11356261e77090c032d8f41971ed Mon Sep 17 00:00:00 2001 From: Petre Bogdan Stolojan Date: Fri, 23 Jul 2021 14:44:29 +0100 Subject: [PATCH 505/666] HADOOP-17458. S3A to treat "SdkClientException: Data read has a different length than the expected" as EOFException (#3040) Some network exceptions can raise SdkClientException with message `Data read has a different length than the expected`. These should be recoverable. Contributed by Bogdan Stolojan --- .../org/apache/hadoop/fs/s3a/S3AUtils.java | 16 +++++---- .../org/apache/hadoop/fs/s3a/TestInvoker.java | 36 +++++++++++++++++++ 2 files changed, 46 insertions(+), 6 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java index 859f71a181587..a897a0a2bb66c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java @@ -132,9 +132,12 @@ public final class S3AUtils { S3AEncryptionMethods.SSE_S3.getMethod() + " is enabled but an encryption key was set in " + SERVER_SIDE_ENCRYPTION_KEY; - private static final String EOF_MESSAGE_IN_XML_PARSER + public static final String EOF_MESSAGE_IN_XML_PARSER = "Failed to sanitize XML document destined for handler class"; + public static final String EOF_READ_DIFFERENT_LENGTH + = "Data read has a different length than the expected"; + private static final String BUCKET_PATTERN = FS_S3A_BUCKET_PREFIX + "%s.%s"; /** @@ -194,7 +197,7 @@ public static IOException translateException(@Nullable String operation, // interrupted IO, or a socket exception underneath that class return translateInterruptedException(exception, innerCause, message); } - if (signifiesConnectionBroken(exception)) { + if (isMessageTranslatableToEOF(exception)) { // call considered an sign of connectivity failure return (EOFException)new EOFException(message).initCause(exception); } @@ -415,13 +418,14 @@ public static boolean isThrottleException(Exception ex) { /** * Cue that an AWS exception is likely to be an EOF Exception based - * on the message coming back from an XML/JSON parser. This is likely - * to be brittle, so only a hint. + * on the message coming back from the client. This is likely to be + * brittle, so only a hint. * @param ex exception * @return true if this is believed to be a sign the connection was broken. */ - public static boolean signifiesConnectionBroken(SdkBaseException ex) { - return ex.toString().contains(EOF_MESSAGE_IN_XML_PARSER); + public static boolean isMessageTranslatableToEOF(SdkBaseException ex) { + return ex.toString().contains(EOF_MESSAGE_IN_XML_PARSER) || + ex.toString().contains(EOF_READ_DIFFERENT_LENGTH); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java index 5da665c46b9ce..4f063904127c3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.s3a; +import java.io.EOFException; import java.io.IOException; import java.io.InterruptedIOException; import java.net.SocketTimeoutException; @@ -28,6 +29,7 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.AmazonServiceException; import com.amazonaws.SdkBaseException; +import com.amazonaws.SdkClientException; import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputExceededException; import com.amazonaws.services.s3.model.AmazonS3Exception; import org.junit.Assert; @@ -163,6 +165,40 @@ public void test500isStatus500Exception() throws Exception { ex); } + @Test + public void testExceptionsWithTranslatableMessage() throws Exception { + SdkBaseException xmlParsing = new SdkBaseException(EOF_MESSAGE_IN_XML_PARSER); + SdkBaseException differentLength = new SdkBaseException(EOF_READ_DIFFERENT_LENGTH); + + verifyTranslated(EOFException.class, xmlParsing); + verifyTranslated(EOFException.class, differentLength); + } + + + @Test + public void testSdkDifferentLengthExceptionIsTranslatable() throws Throwable { + final AtomicInteger counter = new AtomicInteger(0); + invoker.retry("test", null, false, () -> { + if (counter.incrementAndGet() < ACTIVE_RETRY_LIMIT) { + throw new SdkClientException(EOF_READ_DIFFERENT_LENGTH); + } + }); + + assertEquals(ACTIVE_RETRY_LIMIT, counter.get()); + } + + @Test + public void testSdkXmlParsingExceptionIsTranslatable() throws Throwable { + final AtomicInteger counter = new AtomicInteger(0); + invoker.retry("test", null, false, () -> { + if (counter.incrementAndGet() < ACTIVE_RETRY_LIMIT) { + throw new SdkClientException(EOF_MESSAGE_IN_XML_PARSER); + } + }); + + assertEquals(ACTIVE_RETRY_LIMIT, counter.get()); + } + @Test(expected = org.apache.hadoop.net.ConnectTimeoutException.class) public void testExtractConnectTimeoutException() throws Throwable { throw extractException("", "", From 727d8e6368e519081b617f0cdb39a49daa7db42f Mon Sep 17 00:00:00 2001 From: Benjamin Teke Date: Sat, 24 Jul 2021 05:44:21 +0200 Subject: [PATCH 506/666] YARN-10833. Set the X-FRAME-OPTIONS header for the default contexts. (#3203) * YARN-10833. Set the X-FRAME-OPTIONS header for the default contexts. * fixup: YARN-10833. Set the X-FRAME-OPTIONS header for the default contexts. Co-authored-by: Benjamin Teke --- .../apache/hadoop/yarn/webapp/WebApps.java | 31 ++-- .../yarn/webapp/TestRMWithXFSFilter.java | 155 +++++++----------- 2 files changed, 76 insertions(+), 110 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 2f02fd7a0762f..a088f4bf39f12 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -41,7 +41,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.http.RestCsrfPreventionFilter; -import org.apache.hadoop.security.http.XFrameOptionsFilter; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; @@ -325,6 +324,19 @@ public void setup() { YarnConfiguration.YARN_ADMIN_ACL, YarnConfiguration.DEFAULT_YARN_ADMIN_ACL))) .setPathSpec(pathList.toArray(new String[0])); + + // Set the X-FRAME-OPTIONS header, use the HttpServer2 default if + // the header value is not specified + Map xfsParameters = + getConfigParameters(xfsConfigPrefix); + + if (xfsParameters != null) { + String xFrameOptions = xfsParameters.get("xframe-options"); + if (xFrameOptions != null) { + builder.configureXFrame(hasXFSEnabled()) + .setXFrameOption(xFrameOptions); + } + } // Get port ranges from config. IntegerRanges ranges = null; if (portRangeConfigKey != null) { @@ -395,15 +407,6 @@ public void setup() { new String[] {"/*"}); } - params = getConfigParameters(xfsConfigPrefix); - - if (hasXFSEnabled()) { - String xfsClassName = XFrameOptionsFilter.class.getName(); - HttpServer2.defineFilter(server.getWebAppContext(), xfsClassName, - xfsClassName, params, - new String[] {"/*"}); - } - HttpServer2.defineFilter(server.getWebAppContext(), "guice", GuiceFilter.class.getName(), null, new String[] { "/*" }); @@ -489,14 +492,6 @@ private void addFiltersForNewContext(WebAppContext ui2Context) { HttpServer2.defineFilter(ui2Context, restCsrfClassName, restCsrfClassName, params, new String[]{"/*"}); } - - params = getConfigParameters(xfsConfigPrefix); - - if (hasXFSEnabled()) { - String xfsClassName = XFrameOptionsFilter.class.getName(); - HttpServer2.defineFilter(ui2Context, xfsClassName, xfsClassName, params, - new String[]{"/*"}); - } } private String inferHostClass() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithXFSFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithXFSFilter.java index 2f7ecde2fb719..2054754760d71 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithXFSFilter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithXFSFilter.java @@ -18,126 +18,97 @@ package org.apache.hadoop.yarn.webapp; -import com.google.inject.Guice; -import com.google.inject.servlet.ServletModule; -import com.sun.jersey.api.client.ClientResponse; -import com.sun.jersey.api.client.WebResource; -import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; -import com.sun.jersey.test.framework.WebAppDescriptor; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.http.XFrameOptionsFilter; +import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; -import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver; -import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices; -import org.junit.Before; +import org.junit.After; +import org.junit.Assert; import org.junit.Test; -import java.util.HashMap; -import java.util.Map; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; /** * Used TestRMWebServices as an example of web invocations of RM and added * test for XFS Filter. */ -public class TestRMWithXFSFilter extends JerseyTestBase { - +public class TestRMWithXFSFilter { private static MockRM rm; - @Before - @Override - public void setUp() throws Exception { - super.setUp(); - } - - public TestRMWithXFSFilter() { - super(new WebAppDescriptor.Builder( - "org.apache.hadoop.yarn.server.resourcemanager.webapp") - .contextListenerClass(GuiceServletConfig.class) - .filterClass(com.google.inject.servlet.GuiceFilter.class) - .contextPath("jersey-guice-filter").servletPath("/").build()); + private void createMockRm(final Boolean xfsEnabled, + final String xfsHeaderValue) { + Configuration conf = new Configuration(); + conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, + ResourceScheduler.class); + conf.setBoolean("mockrm.webapp.enabled", true); + if (xfsEnabled != null) { + conf.setBoolean(YarnConfiguration.YARN_XFS_ENABLED, xfsEnabled); + } + if (xfsHeaderValue != null) { + conf.setStrings(YarnConfiguration.RM_XFS_OPTIONS, xfsHeaderValue); + } + rm = new MockRM(conf); + rm.start(); } @Test - public void testDefaultBehavior() throws Exception { - createInjector(); - - WebResource r = resource(); - ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("info").accept("application/xml") - .get(ClientResponse.class); - assertEquals("Should have received DENY x-frame options header", - "DENY", - response.getHeaders().get(XFrameOptionsFilter.X_FRAME_OPTIONS).get(0)); - } - - protected void createInjector(String headerValue) { - createInjector(headerValue, false); + public void testXFrameOptionsDefaultBehaviour() throws Exception { + createMockRm(null, null); + + URL url = new URL("http://localhost:8088/ws/v1/cluster/info"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS"); + Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption + .SAMEORIGIN.toString())); } - - protected void createInjector() { - createInjector(null, false); + @Test + public void testXFrameOptionsExplicitlyEnabled() throws Exception { + createMockRm(true, HttpServer2.XFrameOption + .SAMEORIGIN.toString()); + + URL url = new URL("http://localhost:8088/ws/v1/cluster/info"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS"); + Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption + .SAMEORIGIN.toString())); } - protected void createInjector(final String headerValue, - final boolean explicitlyDisabled) { - GuiceServletConfig.setInjector(Guice.createInjector(new ServletModule() { - @Override - protected void configureServlets() { - bind(JAXBContextResolver.class); - bind(RMWebServices.class); - bind(GenericExceptionHandler.class); - Configuration conf = new Configuration(); - conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, - ResourceScheduler.class); - rm = new MockRM(conf); - bind(ResourceManager.class).toInstance(rm); - serve("/*").with(GuiceContainer.class); - XFrameOptionsFilter xfsFilter = new XFrameOptionsFilter(); - Map initParams = new HashMap<>(); - if (headerValue != null) { - initParams.put(XFrameOptionsFilter.CUSTOM_HEADER_PARAM, headerValue); - } - if (explicitlyDisabled) { - initParams.put( - "xframe-options-enabled", "false"); - } - - filter("/*").through(xfsFilter, initParams); - } - })); + @Test + public void testXFrameOptionsEnabledDefaultApps() throws Exception { + createMockRm(true, HttpServer2.XFrameOption + .SAMEORIGIN.toString()); + + URL url = new URL("http://localhost:8088/logs"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS"); + Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption + .SAMEORIGIN.toString())); } @Test - public void testSameOrigin() throws Exception { - createInjector("SAMEORIGIN"); + public void testXFrameOptionsDisabled() throws Exception { + createMockRm(false, null); - WebResource r = resource(); - ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("info").accept("application/xml") - .get(ClientResponse.class); - assertEquals("Should have received SAMEORIGIN x-frame options header", - "SAMEORIGIN", - response.getHeaders().get(XFrameOptionsFilter.X_FRAME_OPTIONS).get(0)); + URL url = new URL("http://localhost:8088/ws/v1/cluster/info"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS"); + Assert.assertNull("Unexpected X-FRAME-OPTION in header", xfoHeader); } @Test - public void testExplicitlyDisabled() throws Exception { - createInjector(null, true); - - WebResource r = resource(); - ClientResponse response = r.path("ws").path("v1").path("cluster") - .path("info").accept("application/xml") - .get(ClientResponse.class); - assertFalse("Should have not received x-frame options header", - response.getHeaders().get(XFrameOptionsFilter.X_FRAME_OPTIONS) == null); + public void testXFrameOptionsIllegalOption() { + IllegalArgumentException e = Assert.assertThrows( + IllegalArgumentException.class, + () -> createMockRm(true, "otherValue")); } + @After + public void tearDown() throws IOException { + rm.close(); + } } From 72a9c64158c813ea4cb453bb837f20ffcb351242 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Sat, 24 Jul 2021 14:48:25 +0900 Subject: [PATCH 507/666] HADOOP-17317. [JDK 11] Upgrade dnsjava to remove illegal access warnings (#2442) Reviewed-by: Masatake Iwasaki --- .../apache/hadoop/security/SecurityUtil.java | 7 +- .../registry/server/dns/RegistryDNS.java | 77 +++++------- .../registry/server/dns/SecureableZone.java | 3 +- .../registry/server/dns/TestRegistryDNS.java | 118 +++++++++--------- hadoop-project/pom.xml | 2 +- 5 files changed, 93 insertions(+), 114 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java index 3b9e9c53e44f4..59383dfefb29b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java @@ -608,11 +608,8 @@ protected static class QualifiedHostResolver implements HostResolver { private List searchDomains = new ArrayList<>(); { ResolverConfig resolverConfig = ResolverConfig.getCurrentConfig(); - Name[] names = resolverConfig.searchPath(); - if (names != null) { - for (Name name : names) { - searchDomains.add(name.toString()); - } + for (Name name : resolverConfig.searchPath()) { + searchDomains.add(name.toString()); } } diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java index eeee581540963..8dbe79682e2d5 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java @@ -75,7 +75,6 @@ import java.net.Socket; import java.net.SocketAddress; import java.net.SocketException; -import java.net.UnknownHostException; import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; import java.nio.channels.DatagramChannel; @@ -87,8 +86,10 @@ import java.security.spec.InvalidKeySpecException; import java.security.spec.RSAPrivateKeySpec; import java.text.SimpleDateFormat; +import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; import java.util.ArrayList; -import java.util.Calendar; import java.util.Collection; import java.util.Date; import java.util.Enumeration; @@ -232,13 +233,7 @@ private void updateDNSServer(Configuration conf) { } catch (SocketException e) { } ResolverConfig.refresh(); - ExtendedResolver resolver; - try { - resolver = new ExtendedResolver(); - } catch (UnknownHostException e) { - LOG.error("Can not resolve DNS servers: ", e); - return; - } + ExtendedResolver resolver = new ExtendedResolver(); for (Resolver check : resolver.getResolvers()) { if (check instanceof SimpleResolver) { InetAddress address = ((SimpleResolver) check).getAddress() @@ -247,7 +242,7 @@ private void updateDNSServer(Configuration conf) { resolver.deleteResolver(check); continue; } else { - check.setTimeout(30); + check.setTimeout(Duration.ofSeconds(30)); } } else { LOG.error("Not simple resolver!!!?" + check); @@ -260,12 +255,10 @@ private void updateDNSServer(Configuration conf) { } StringBuilder message = new StringBuilder(); message.append("DNS servers: "); - if (ResolverConfig.getCurrentConfig().servers() != null) { - for (String server : ResolverConfig.getCurrentConfig() - .servers()) { - message.append(server); - message.append(" "); - } + for (InetSocketAddress address : + ResolverConfig.getCurrentConfig().servers()) { + message.append(address); + message.append(" "); } LOG.info(message.toString()); } @@ -331,11 +324,10 @@ private void signZones() throws IOException { if (isDNSSECEnabled()) { Collection zoneCollection = zones.values(); for (Zone zone : zoneCollection) { - Iterator itor = zone.iterator(); + Iterator itor = zone.iterator(); while (itor.hasNext()) { - RRset rRset = (RRset) itor.next(); - Iterator sigs = rRset.sigs(); - if (!sigs.hasNext()) { + RRset rRset = itor.next(); + if (!rRset.sigs().isEmpty()) { try { signSiteRecord(zone, rRset.first()); } catch (DNSSEC.DNSSECException e) { @@ -692,10 +684,8 @@ private void signSiteRecord(Zone zone, Record record) throws DNSSEC.DNSSECException { RRset rrset = zone.findExactMatch(record.getName(), record.getType()); - Calendar cal = Calendar.getInstance(); - Date inception = cal.getTime(); - cal.add(Calendar.YEAR, 1); - Date expiration = cal.getTime(); + Instant inception = Instant.now(); + Instant expiration = inception.plus(365, ChronoUnit.DAYS); RRSIGRecord rrsigRecord = DNSSEC.sign(rrset, dnsKeyRecs.get(zone.getOrigin()), privateKey, inception, expiration); @@ -1159,7 +1149,7 @@ private byte remoteLookup(Message response, Name name, int type, } } if (r.getType() == Type.CNAME) { - Name cname = ((CNAMERecord) r).getAlias(); + Name cname = r.getName(); if (iterations < 6) { remoteLookup(response, cname, type, iterations + 1); } @@ -1255,9 +1245,7 @@ private int getMaxLength(Socket s, OPTRecord queryOPT) { * @param flags the flags. */ private void addAdditional2(Message response, int section, int flags) { - Record[] records = response.getSectionArray(section); - for (int i = 0; i < records.length; i++) { - Record r = records[i]; + for (Record r : response.getSection(section)) { Name glueName = r.getAdditionalName(); if (glueName != null) { addGlue(response, glueName, flags); @@ -1403,11 +1391,10 @@ byte addAnswer(Message response, Name name, int type, int dclass, response.getHeader().setFlag(Flags.AA); } } else if (sr.isSuccessful()) { - RRset[] rrsets = sr.answers(); + List rrsets = sr.answers(); LOG.info("found answers {}", rrsets); - for (int i = 0; i < rrsets.length; i++) { - addRRset(name, response, rrsets[i], - Section.ANSWER, flags); + for (RRset rrset : rrsets) { + addRRset(name, response, rrset, Section.ANSWER, flags); } addNS(response, zone, flags); if (iterations == 0) { @@ -1456,7 +1443,7 @@ private void addSOA(Message response, Zone zone, int flags) { private void addNXT(Message response, int flags) throws DNSSEC.DNSSECException, IOException { Record nxtRecord = getNXTRecord( - response.getSectionArray(Section.QUESTION)[0]); + response.getSection(Section.QUESTION).get(0)); Zone zone = findBestZone(nxtRecord.getName()); addRecordCommand.exec(zone, nxtRecord); RRset nxtRR = zone.findExactMatch(nxtRecord.getName(), Type.NXT); @@ -1515,9 +1502,7 @@ private void addRRset(Name name, Message response, RRset rrset, int section, } } if ((flags & FLAG_SIGONLY) == 0) { - Iterator it = rrset.rrs(); - while (it.hasNext()) { - Record r = (Record) it.next(); + for (Record r : rrset.rrs()) { if (r.getName().isWild() && !name.isWild()) { r = r.withName(name); } @@ -1525,9 +1510,7 @@ private void addRRset(Name name, Message response, RRset rrset, int section, } } if ((flags & (FLAG_SIGONLY | FLAG_DNSSECOK)) != 0) { - Iterator it = rrset.sigs(); - while (it.hasNext()) { - Record r = (Record) it.next(); + for (Record r : rrset.sigs()) { if (r.getName().isWild() && !name.isWild()) { r = r.withName(name); } @@ -1554,13 +1537,13 @@ byte[] doAXFR(Name name, Message query, TSIG tsig, TSIGRecord qtsig, if (zone == null) { return errorMessage(query, Rcode.REFUSED); } - Iterator it = zone.AXFR(); + Iterator it = zone.AXFR(); try { DataOutputStream dataOut; dataOut = new DataOutputStream(s.getOutputStream()); int id = query.getHeader().getID(); while (it.hasNext()) { - RRset rrset = (RRset) it.next(); + RRset rrset = it.next(); Message response = new Message(id); Header header = response.getHeader(); header.setFlag(Flags.QR); @@ -1568,7 +1551,7 @@ byte[] doAXFR(Name name, Message query, TSIG tsig, TSIGRecord qtsig, addRRset(rrset.getName(), response, rrset, Section.ANSWER, FLAG_DNSSECOK); if (tsig != null) { - tsig.applyStream(response, qtsig, first); + tsig.apply(response, qtsig, first); qtsig = response.getTSIG(); } first = false; @@ -1688,10 +1671,8 @@ public void exec(Zone zone, Record record) throws IOException { zone.addRecord(record); LOG.info("Registered {}", record); if (isDNSSECEnabled()) { - Calendar cal = Calendar.getInstance(); - Date inception = cal.getTime(); - cal.add(Calendar.YEAR, 1); - Date expiration = cal.getTime(); + Instant inception = Instant.now(); + Instant expiration = inception.plus(365, ChronoUnit.DAYS); RRset rRset = zone.findExactMatch(record.getName(), record.getType()); try { @@ -1727,8 +1708,8 @@ public void exec(Zone zone, Record record) throws IOException { */ private void addDSRecord(Zone zone, Name name, int dClass, long dsTtl, - Date inception, - Date expiration) throws DNSSEC.DNSSECException { + Instant inception, + Instant expiration) throws DNSSEC.DNSSECException { RRset rRset; RRSIGRecord rrsigRecord; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/SecureableZone.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/SecureableZone.java index 4b0a85269d3cd..c2f65321dd3cb 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/SecureableZone.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/SecureableZone.java @@ -138,8 +138,7 @@ public Record getNXTRecord(Record queryRecord, Zone zone) { SetResponse sr = zone.findRecords(base.getName(), Type.ANY); BitSet bitMap = new BitSet(); bitMap.set(Type.NXT); - RRset[] rRsets = sr.answers(); - for (RRset rRset : rRsets) { + for (RRset rRset : sr.answers()) { int typeCode = rRset.getType(); if (typeCode > 0 && typeCode < 128) { bitMap.set(typeCode); diff --git a/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java index a0c4ca3970c5c..56e617144ad38 100644 --- a/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java +++ b/hadoop-common-project/hadoop-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java @@ -51,8 +51,9 @@ import java.security.KeyFactory; import java.security.PrivateKey; import java.security.spec.RSAPrivateKeySpec; -import java.util.Calendar; -import java.util.Date; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.List; import java.util.concurrent.TimeUnit; import static org.apache.hadoop.registry.client.api.RegistryConstants.*; @@ -194,34 +195,37 @@ public void testAppRegistration() throws Exception { "/registry/users/root/services/org-apache-slider/test1/", record); // start assessing whether correct records are available - Record[] recs = assertDNSQuery("test1.root.dev.test."); + List recs = assertDNSQuery("test1.root.dev.test."); assertEquals("wrong result", "192.168.1.5", - ((ARecord) recs[0]).getAddress().getHostAddress()); + ((ARecord) recs.get(0)).getAddress().getHostAddress()); recs = assertDNSQuery("management-api.test1.root.dev.test.", 2); assertEquals("wrong target name", "test1.root.dev.test.", - ((CNAMERecord) recs[0]).getTarget().toString()); - assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord); + ((CNAMERecord) recs.get(0)).getTarget().toString()); + assertTrue("not an ARecord", + recs.get(isSecure() ? 2 : 1) instanceof ARecord); recs = assertDNSQuery("appmaster-ipc-api.test1.root.dev.test.", Type.SRV, 1); - assertTrue("not an SRV record", recs[0] instanceof SRVRecord); - assertEquals("wrong port", 1026, ((SRVRecord) recs[0]).getPort()); + assertTrue("not an SRV record", recs.get(0) instanceof SRVRecord); + assertEquals("wrong port", 1026, ((SRVRecord) recs.get(0)).getPort()); recs = assertDNSQuery("appmaster-ipc-api.test1.root.dev.test.", 2); assertEquals("wrong target name", "test1.root.dev.test.", - ((CNAMERecord) recs[0]).getTarget().toString()); - assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord); + ((CNAMERecord) recs.get(0)).getTarget().toString()); + assertTrue("not an ARecord", + recs.get(isSecure() ? 2 : 1) instanceof ARecord); recs = assertDNSQuery("http-api.test1.root.dev.test.", 2); assertEquals("wrong target name", "test1.root.dev.test.", - ((CNAMERecord) recs[0]).getTarget().toString()); - assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord); + ((CNAMERecord) recs.get(0)).getTarget().toString()); + assertTrue("not an ARecord", + recs.get(isSecure() ? 2 : 1) instanceof ARecord); recs = assertDNSQuery("http-api.test1.root.dev.test.", Type.SRV, 1); - assertTrue("not an SRV record", recs[0] instanceof SRVRecord); - assertEquals("wrong port", 1027, ((SRVRecord) recs[0]).getPort()); + assertTrue("not an SRV record", recs.get(0) instanceof SRVRecord); + assertEquals("wrong port", 1027, ((SRVRecord) recs.get(0)).getPort()); assertDNSQuery("test1.root.dev.test.", Type.TXT, 3); assertDNSQuery("appmaster-ipc-api.test1.root.dev.test.", Type.TXT, 1); @@ -239,13 +243,13 @@ public void testContainerRegistration() throws Exception { record); // start assessing whether correct records are available - Record[] recs = + List recs = assertDNSQuery("ctr-e50-1451931954322-0016-01-000002.dev.test."); assertEquals("wrong result", "172.17.0.19", - ((ARecord) recs[0]).getAddress().getHostAddress()); + ((ARecord) recs.get(0)).getAddress().getHostAddress()); recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1); - assertTrue("not an ARecord", recs[0] instanceof ARecord); + assertTrue("not an ARecord", recs.get(0) instanceof ARecord); } @Test @@ -277,16 +281,16 @@ public void testRecordTTL() throws Exception { record); // start assessing whether correct records are available - Record[] recs = assertDNSQuery( + List recs = assertDNSQuery( "ctr-e50-1451931954322-0016-01-000002.dev.test."); assertEquals("wrong result", "172.17.0.19", - ((ARecord) recs[0]).getAddress().getHostAddress()); - assertEquals("wrong ttl", 30L, recs[0].getTTL()); + ((ARecord) recs.get(0)).getAddress().getHostAddress()); + assertEquals("wrong ttl", 30L, recs.get(0).getTTL()); recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1); - assertTrue("not an ARecord", recs[0] instanceof ARecord); + assertTrue("not an ARecord", recs.get(0) instanceof ARecord); - assertEquals("wrong ttl", 30L, recs[0].getTTL()); + assertEquals("wrong ttl", 30L, recs.get(0).getTTL()); } @Test @@ -299,10 +303,11 @@ public void testReverseLookup() throws Exception { record); // start assessing whether correct records are available - Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1); + List recs = assertDNSQuery( + "19.0.17.172.in-addr.arpa.", Type.PTR, 1); assertEquals("wrong result", "httpd-1.test1.root.dev.test.", - ((PTRRecord) recs[0]).getTarget().toString()); + ((PTRRecord) recs.get(0)).getTarget().toString()); } @Test @@ -325,10 +330,11 @@ public void testReverseLookupInLargeNetwork() throws Exception { record); // start assessing whether correct records are available - Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1); + List recs = assertDNSQuery( + "19.0.17.172.in-addr.arpa.", Type.PTR, 1); assertEquals("wrong result", "httpd-1.test1.root.dev.test.", - ((PTRRecord) recs[0]).getTarget().toString()); + ((PTRRecord) recs.get(0)).getTarget().toString()); } @Test @@ -372,16 +378,16 @@ public void testNoContainerIP() throws Exception { assertEquals("wrong status", Rcode.NXDOMAIN, response.getRcode()); } - private Record[] assertDNSQuery(String lookup) throws IOException { + private List assertDNSQuery(String lookup) throws IOException { return assertDNSQuery(lookup, Type.A, 1); } - private Record[] assertDNSQuery(String lookup, int numRecs) + private List assertDNSQuery(String lookup, int numRecs) throws IOException { return assertDNSQuery(lookup, Type.A, numRecs); } - Record[] assertDNSQuery(String lookup, int type, int numRecs) + private List assertDNSQuery(String lookup, int type, int numRecs) throws IOException { Name name = Name.fromString(lookup); Record question = Record.newRecord(name, type, DClass.IN); @@ -394,9 +400,9 @@ Record[] assertDNSQuery(String lookup, int type, int numRecs) assertNotNull("Null response", response); assertEquals("Questions do not match", query.getQuestion(), response.getQuestion()); - Record[] recs = response.getSectionArray(Section.ANSWER); + List recs = response.getSection(Section.ANSWER); assertEquals("wrong number of answer records", - isSecure() ? numRecs * 2 : numRecs, recs.length); + isSecure() ? numRecs * 2 : numRecs, recs.size()); if (isSecure()) { boolean signed = false; for (Record record : recs) { @@ -410,8 +416,8 @@ Record[] assertDNSQuery(String lookup, int type, int numRecs) return recs; } - Record[] assertDNSQueryNotNull(String lookup, int type, int answerCount) - throws IOException { + private List assertDNSQueryNotNull( + String lookup, int type, int answerCount) throws IOException { Name name = Name.fromString(lookup); Record question = Record.newRecord(name, type, DClass.IN); Message query = Message.newQuery(question); @@ -423,9 +429,9 @@ Record[] assertDNSQueryNotNull(String lookup, int type, int answerCount) assertNotNull("Null response", response); assertEquals("Questions do not match", query.getQuestion(), response.getQuestion()); - Record[] recs = response.getSectionArray(Section.ANSWER); - assertEquals(answerCount, recs.length); - assertEquals(recs[0].getType(), type); + List recs = response.getSection(Section.ANSWER); + assertEquals(answerCount, recs.size()); + assertEquals(type, recs.get(0).getType()); return recs; } @@ -461,10 +467,8 @@ public void testDNSKEYRecord() throws Exception { ARecord aRecord = new ARecord(Name.fromString("some.test."), DClass.IN, 0, InetAddress.getByName("192.168.0.1")); - Calendar cal = Calendar.getInstance(); - Date inception = cal.getTime(); - cal.add(Calendar.YEAR, 1); - Date expiration = cal.getTime(); + Instant inception = Instant.now(); + Instant expiration = inception.plus(365, ChronoUnit.DAYS); RRset rrset = new RRset(aRecord); RRSIGRecord rrsigRecord = DNSSEC.sign(rrset, dnskeyRecord, @@ -495,13 +499,13 @@ public void testAAAALookup() throws Exception { record); // start assessing whether correct records are available - Record[] recs = assertDNSQuery( + List recs = assertDNSQuery( "ctr-e50-1451931954322-0016-01-000002.dev.test.", Type.AAAA, 1); assertEquals("wrong result", "172.17.0.19", - ((AAAARecord) recs[0]).getAddress().getHostAddress()); + ((AAAARecord) recs.get(0)).getAddress().getHostAddress()); recs = assertDNSQuery("httpd-1.test1.root.dev.test.", Type.AAAA, 1); - assertTrue("not an ARecord", recs[0] instanceof AAAARecord); + assertTrue("not an ARecord", recs.get(0) instanceof AAAARecord); } @Test @@ -524,9 +528,9 @@ public void testNegativeLookup() throws Exception { assertNotNull("Null response", response); assertEquals("Questions do not match", query.getQuestion(), response.getQuestion()); - Record[] sectionArray = response.getSectionArray(Section.AUTHORITY); + List sectionArray = response.getSection(Section.AUTHORITY); assertEquals("Wrong number of recs in AUTHORITY", isSecure() ? 2 : 1, - sectionArray.length); + sectionArray.size()); boolean soaFound = false; for (Record rec : sectionArray) { soaFound = rec.getType() == Type.SOA; @@ -570,19 +574,19 @@ public void testReadMasterFile() throws Exception { record); // start assessing whether correct records are available - Record[] recs = + List recs = assertDNSQuery("ctr-e50-1451931954322-0016-01-000002.dev.test."); assertEquals("wrong result", "172.17.0.19", - ((ARecord) recs[0]).getAddress().getHostAddress()); + ((ARecord) recs.get(0)).getAddress().getHostAddress()); recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1); - assertTrue("not an ARecord", recs[0] instanceof ARecord); + assertTrue("not an ARecord", recs.get(0) instanceof ARecord); // lookup dyanmic reverse records recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1); assertEquals("wrong result", "httpd-1.test1.root.dev.test.", - ((PTRRecord) recs[0]).getTarget().toString()); + ((PTRRecord) recs.get(0)).getTarget().toString()); // now lookup static reverse records Name name = Name.fromString("5.0.17.172.in-addr.arpa."); @@ -592,9 +596,9 @@ public void testReadMasterFile() throws Exception { query.addRecord(optRecord, Section.ADDITIONAL); byte[] responseBytes = getRegistryDNS().generateReply(query, null); Message response = new Message(responseBytes); - recs = response.getSectionArray(Section.ANSWER); + recs = response.getSection(Section.ANSWER); assertEquals("wrong result", "cn005.dev.test.", - ((PTRRecord) recs[0]).getTarget().toString()); + ((PTRRecord) recs.get(0)).getTarget().toString()); } @Test @@ -655,8 +659,7 @@ public void testExternalCNAMERecord() throws Exception { getRegistryDNS().initializeZones(conf); // start assessing whether correct records are available - Record[] recs = - assertDNSQueryNotNull("mail.yahoo.com.", Type.CNAME, 1); + assertDNSQueryNotNull("mail.yahoo.com.", Type.CNAME, 1); } @Test @@ -672,8 +675,7 @@ public void testRootLookup() throws Exception { getRegistryDNS().initializeZones(conf); // start assessing whether correct records are available - Record[] recs = - assertDNSQueryNotNull(".", Type.NS, 13); + assertDNSQueryNotNull(".", Type.NS, 13); } @Test @@ -692,10 +694,10 @@ public void testMultiARecord() throws Exception { record2); // start assessing whether correct records are available - Record[] recs = + List recs = assertDNSQuery("httpd.test1.root.dev.test.", 2); - assertTrue("not an ARecord", recs[0] instanceof ARecord); - assertTrue("not an ARecord", recs[1] instanceof ARecord); + assertTrue("not an ARecord", recs.get(0) instanceof ARecord); + assertTrue("not an ARecord", recs.get(1) instanceof ARecord); } @Test(timeout=5000) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index e0e95d5706306..e7204a71de368 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -100,7 +100,7 @@ 3.5.6 4.2.0 3.0.5 - 2.1.7 + 3.4.0 27.0-jre 4.2.3 From 3cbee6e6881ae14c30054053f2eeac9ce617d3d6 Mon Sep 17 00:00:00 2001 From: Anoop Sam John Date: Sun, 25 Jul 2021 13:10:27 +0530 Subject: [PATCH 508/666] Addendum HADOOP-17770 WASB : Support disabling buffered reads in positional reads - Added the invalid SpotBugs warning to findbugs-exclude.xml (#3223) --- .../hadoop-azure/dev-support/findbugs-exclude.xml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml index b750b8b91c79e..fa6085faa55bb 100644 --- a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml +++ b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml @@ -83,4 +83,17 @@ + + + + + + From 6e4d4f4eb7c812d9ad6b7865384fca766470cec2 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 26 Jul 2021 01:53:39 +0900 Subject: [PATCH 509/666] HDFS-12920. HDFS default value change (with adding time unit) breaks old version MR tarball work with new version (3.0) of hadoop. (#3227) Revert "HDFS-10845. Change defaults in hdfs-site.xml to match timeunit type. Contributed by Yiqun Lin" This reverts commit b6d839a60ceed733bfacb791fc5ed06116720dd0. Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml --- .../src/main/resources/hdfs-default.xml | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index a3c0fc85b44be..58e709f77b4fe 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -791,7 +791,7 @@ dfs.blockreport.initialDelay - 0s + 0 Delay for first block report in seconds. Support multiple time unit suffix(case insensitive), as described in dfs.heartbeat.interval.If @@ -836,7 +836,7 @@ dfs.datanode.directoryscan.interval - 21600s + 21600 Interval in seconds for Datanode to scan data directories and reconcile the difference between blocks in memory and on the disk. Support multiple time unit suffix(case insensitive), as described @@ -887,7 +887,7 @@ dfs.heartbeat.interval - 3s + 3 Determines datanode heartbeat interval in seconds. Can use the following suffix (case insensitive): @@ -1100,7 +1100,7 @@ dfs.namenode.decommission.interval - 30s + 30 Namenode periodicity in seconds to check if decommission or maintenance is complete. Support multiple time unit suffix(case insensitive), as described in dfs.heartbeat.interval. @@ -1168,7 +1168,7 @@ dfs.namenode.redundancy.interval.seconds - 3s + 3 The periodicity in seconds with which the namenode computes low redundancy work for datanodes. Support multiple time unit suffix(case insensitive), as described in dfs.heartbeat.interval. @@ -1292,7 +1292,7 @@ dfs.namenode.checkpoint.period - 3600s + 3600 The number of seconds between two periodic checkpoints. Support multiple time unit suffix(case insensitive), as described @@ -1312,7 +1312,7 @@ dfs.namenode.checkpoint.check.period - 60s + 60 The SecondaryNameNode and CheckpointNode will poll the NameNode every 'dfs.namenode.checkpoint.check.period' seconds to query the number of uncheckpointed transactions. Support multiple time unit suffix(case insensitive), @@ -1769,7 +1769,7 @@ dfs.client.datanode-restart.timeout - 30s + 30 Expert only. The time to wait, in seconds, from reception of an datanode shutdown notification for quick restart, until declaring @@ -1839,7 +1839,7 @@ dfs.ha.log-roll.period - 120s + 120 How often, in seconds, the StandbyNode should ask the active to roll edit logs. Since the StandbyNode only reads from finalized @@ -1854,7 +1854,7 @@ dfs.ha.tail-edits.period - 60s + 60 How often, the StandbyNode and ObserverNode should check if there are new edit log entries ready to be consumed. This is the minimum period between @@ -3805,7 +3805,7 @@ dfs.datanode.bp-ready.timeout - 20s + 20 The maximum wait time for datanode to be ready before failing the received request. Setting this to 0 fails requests right away if the From e76e46c46deaf5ce3457c49405156188d24ea5bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jul 2021 02:16:21 +0900 Subject: [PATCH 510/666] [UI2] Bump http-proxy to 1.18.1 (#2891) Bumps [http-proxy](https://github.com/http-party/node-http-proxy) from 1.18.0 to 1.18.1. - [Release notes](https://github.com/http-party/node-http-proxy/releases) - [Changelog](https://github.com/http-party/node-http-proxy/blob/master/CHANGELOG.md) - [Commits](https://github.com/http-party/node-http-proxy/compare/1.18.0...1.18.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Reviewed-by: Akira Ajisaka --- .../hadoop-yarn-ui/src/main/webapp/yarn.lock | 22 +++++++++---------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock index 4b055e6f1fd9b..9da019198671c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock @@ -1751,7 +1751,7 @@ debug@2.6.9, debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.6. dependencies: ms "2.0.0" -debug@^3.0.0, debug@^3.1.0: +debug@^3.1.0: version "3.2.6" resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.6.tgz#e83d17de16d8a7efb7717edbe5fb10135eee629b" integrity sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ== @@ -2574,9 +2574,9 @@ event-emitter@~0.3.4: es5-ext "~0.10.14" eventemitter3@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.0.tgz#d65176163887ee59f386d64c82610b696a4a74eb" - integrity sha512-qerSRB0p+UDEssxTtm6EDKcE7W4OaoisfIMl4CngyEhjpYglocpNg6UEqCvemdGhosAsg4sO2dXJOdyBifPGCg== + version "4.0.7" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f" + integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== events-to-array@^1.0.1: version "1.1.2" @@ -2829,11 +2829,9 @@ fireworm@^0.6.6: minimatch "~0.2.9" follow-redirects@^1.0.0: - version "1.9.0" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.9.0.tgz#8d5bcdc65b7108fe1508649c79c12d732dcedb4f" - integrity sha512-CRcPzsSIbXyVDl0QI01muNDu69S8trU4jArW9LpOt2WtC6LyUJetcIrmfHsRBx7/Jb6GHJUiuqyYxPooFfNt6A== - dependencies: - debug "^3.0.0" + version "1.13.3" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.13.3.tgz#e5598ad50174c1bc4e872301e82ac2cd97f90267" + integrity sha512-DUgl6+HDzB0iEptNQEXLx/KhTmDb8tZUHSeLqpnjpknR70H0nC2t9N73BK6fN4hOvJ84pKlIQVQ4k5FFlBedKA== for-in@^1.0.1: version "1.0.2" @@ -3446,9 +3444,9 @@ http-errors@~1.7.2: integrity sha1-ksnBN0w1CF912zWexWzCV8u5P6Q= http-proxy@^1.8.1, http-proxy@^1.9.0: - version "1.18.0" - resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.18.0.tgz#dbe55f63e75a347db7f3d99974f2692a314a6a3a" - integrity sha512-84I2iJM/n1d4Hdgc6y2+qY5mDaz2PUVjlg9znE9byl+q0uC3DeByqBGReQu5tpLK0TAqTIXScRUV+dg7+bUPpQ== + version "1.18.1" + resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.18.1.tgz#401541f0534884bbf95260334e72f88ee3976549" + integrity sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ== dependencies: eventemitter3 "^4.0.0" follow-redirects "^1.0.0" From 386072f87abad3a7917900fccb50aed8e46d41a9 Mon Sep 17 00:00:00 2001 From: litao Date: Mon, 26 Jul 2021 12:45:01 +0800 Subject: [PATCH 511/666] HDFS-16131. Show storage type for failed volumes on namenode web (#3211). Contributed by tomscut. Signed-off-by: He Xiaoqiao --- .../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 2 +- .../hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 4 ++-- .../hdfs/server/datanode/TestDataNodeVolumeFailure.java | 4 ++++ .../server/datanode/TestDataNodeVolumeFailureReporting.java | 5 ++++- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 209db423b8c0c..73dc5846a9141 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -723,7 +723,7 @@ ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException { for (Iterator newLocationItr = results.newLocations.iterator(); newLocationItr.hasNext();) { StorageLocation newLocation = newLocationItr.next(); - if (newLocation.getNormalizedUri().toString().equals( + if (newLocation.toString().equals( failedStorageLocation)) { // The failed storage is being re-added. DataNode#refreshVolumes() // will take care of re-assessing it. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index b399588285cdb..597ffbe806694 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -728,7 +728,7 @@ public String[] getFailedStorageLocations() { infos.length); for (VolumeFailureInfo info: infos) { failedStorageLocations.add( - info.getFailedStorageLocation().getNormalizedUri().toString()); + info.getFailedStorageLocation().toString()); } return failedStorageLocations.toArray( new String[failedStorageLocations.size()]); @@ -767,7 +767,7 @@ public VolumeFailureSummary getVolumeFailureSummary() { long estimatedCapacityLostTotal = 0; for (VolumeFailureInfo info: infos) { failedStorageLocations.add( - info.getFailedStorageLocation().getNormalizedUri().toString()); + info.getFailedStorageLocation().toString()); long failureDate = info.getFailureDate(); if (failureDate > lastVolumeFailureDate) { lastVolumeFailureDate = failureDate; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index e15baacac843f..6b5faee984914 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -215,6 +215,10 @@ public Boolean get() { BlockManagerTestUtil.checkHeartbeat(bm); // NN now should have latest volume failure assertEquals(1, cluster.getNamesystem().getVolumeFailuresTotal()); + // assert failedStorageLocations + assertTrue(dn.getFSDataset().getVolumeFailureSummary() + .getFailedStorageLocations()[0] + .contains("[DISK]")); // verify number of blocks and files... verify(filename, filesize); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index 326c54c8f36bd..6011d6eeef1a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -721,7 +721,10 @@ private String[] convertToAbsolutePaths(String[] locations) { String[] absolutePaths = new String[locations.length]; for (int count = 0; count < locations.length; count++) { try { - absolutePaths[count] = new File(new URI(locations[count])) + String location = locations[count]; + location = location.contains("]") + ? location.substring(location.indexOf("]") + 1) : location; + absolutePaths[count] = new File(new URI(location)) .getAbsolutePath(); } catch (URISyntaxException e) { //if the provided location is not an URI, From b3c30e92897a2bc1a0420e766438df2040785535 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 26 Jul 2021 16:46:07 +0900 Subject: [PATCH 512/666] HDFS-16140. TestBootstrapAliasmap fails by BindException. (#3229) Reviewed-by: Hui Fei --- .../hadoop/hdfs/server/namenode/ha/TestBootstrapAliasmap.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapAliasmap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapAliasmap.java index 715622645eaa8..41db9433de557 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapAliasmap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapAliasmap.java @@ -51,6 +51,9 @@ public class TestBootstrapAliasmap { public void setup() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster.setupNamenodeProvidedConfiguration(conf); + // use free port instead of default 50200 port + conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS, + "127.0.0.1:" + NetUtils.getFreeSocketPort()); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .build(); From fa83a9d49a27fe34549d2f6af3e1aa02aee6a8d1 Mon Sep 17 00:00:00 2001 From: Gautham B A Date: Tue, 27 Jul 2021 01:39:36 +0530 Subject: [PATCH 513/666] HADOOP-17807. Use separate src dir for platform builds (#3210) --- dev-support/Jenkinsfile | 195 ++++++++++++++++++++++++++++------------ dev-support/jenkins.sh | 68 +++++++++++--- 2 files changed, 194 insertions(+), 69 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 6cabbfe7b54c4..9e1c7699e52ad 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -29,9 +29,6 @@ pipeline { } environment { - SOURCEDIR = 'src' - // will also need to change notification section below - PATCHDIR = 'out' YETUS='yetus' // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' YETUS_VERSION='f9ba0170a5787a5f4662d3769804fef0226a182f' @@ -56,6 +53,35 @@ pipeline { } } + // Setup codebase so that each platform's build happens in its own exclusive copy of the + // codebase. + // Primarily because YETUS messes up the git branch information and affects the subsequent + // optional stages after the first one. + stage ('setup sources') { + steps { + dir("${WORKSPACE}/centos-8") { + sh '''#!/usr/bin/env bash + + cp -Rp ${WORKSPACE}/src ${WORKSPACE}/centos-8 + ''' + } + + dir("${WORKSPACE}/debian-10") { + sh '''#!/usr/bin/env bash + + cp -Rp ${WORKSPACE}/src ${WORKSPACE}/debian-10 + ''' + } + + dir("${WORKSPACE}/ubuntu-focal") { + sh '''#!/usr/bin/env bash + + cp -Rp ${WORKSPACE}/src ${WORKSPACE}/ubuntu-focal + ''' + } + } + } + // This is an optional stage which runs only when there's a change in // C++/C++ build/platform. // This stage serves as a means of cross platform validation, which is @@ -63,6 +89,8 @@ pipeline { // break the Hadoop build on Centos 8. stage ('precommit-run Centos 8') { environment { + SOURCEDIR = "${WORKSPACE}/centos-8/src" + PATCHDIR = "${WORKSPACE}/centos-8/out" DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile_centos_8" IS_OPTIONAL = 1 } @@ -78,10 +106,32 @@ pipeline { sh '''#!/usr/bin/env bash chmod u+x "${SOURCEDIR}/dev-support/jenkins.sh" - "${SOURCEDIR}/dev-support/jenkins.sh" + "${SOURCEDIR}/dev-support/jenkins.sh" run_ci ''' } } + + post { + // Since this is an optional platform, we want to copy the artifacts + // and archive it only if the build fails, to help with debugging. + failure { + sh '''#!/usr/bin/env bash + + cp -Rp "${WORKSPACE}/centos-8/out" "${WORKSPACE}" + ''' + archiveArtifacts "out/**" + } + + cleanup() { + script { + sh '''#!/usr/bin/env bash + + chmod u+x "${SOURCEDIR}/dev-support/jenkins.sh" + "${SOURCEDIR}/dev-support/jenkins.sh" cleanup_ci_proc + ''' + } + } + } } // This is an optional stage which runs only when there's a change in @@ -91,6 +141,8 @@ pipeline { // break the Hadoop build on Debian 10. stage ('precommit-run Debian 10') { environment { + SOURCEDIR = "${WORKSPACE}/debian-10/src" + PATCHDIR = "${WORKSPACE}/debian-10/out" DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile_debian_10" IS_OPTIONAL = 1 } @@ -106,16 +158,40 @@ pipeline { sh '''#!/usr/bin/env bash chmod u+x "${SOURCEDIR}/dev-support/jenkins.sh" - "${SOURCEDIR}/dev-support/jenkins.sh" + "${SOURCEDIR}/dev-support/jenkins.sh" run_ci ''' } } + + post { + // Since this is an optional platform, we want to copy the artifacts + // and archive it only if the build fails, to help with debugging. + failure { + sh '''#!/usr/bin/env bash + + cp -Rp "${WORKSPACE}/debian-10/out" "${WORKSPACE}" + ''' + archiveArtifacts "out/**" + } + + cleanup() { + script { + sh '''#!/usr/bin/env bash + + chmod u+x "${SOURCEDIR}/dev-support/jenkins.sh" + "${SOURCEDIR}/dev-support/jenkins.sh" cleanup_ci_proc + ''' + } + } + } } // We want to use Ubuntu Focal as our main CI and thus, this stage // isn't optional (runs for all the PRs). stage ('precommit-run Ubuntu focal') { environment { + SOURCEDIR = "${WORKSPACE}/ubuntu-focal/src" + PATCHDIR = "${WORKSPACE}/ubuntu-focal/out" DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile" IS_OPTIONAL = 0 } @@ -131,69 +207,76 @@ pipeline { sh '''#!/usr/bin/env bash chmod u+x "${SOURCEDIR}/dev-support/jenkins.sh" - "${SOURCEDIR}/dev-support/jenkins.sh" + "${SOURCEDIR}/dev-support/jenkins.sh" run_ci ''' } } - } - } + post { + always { + script { + // Publish status if it was missed (YETUS-1059) + withCredentials( + [usernamePassword(credentialsId: '683f5dcf-5552-4b28-9fb1-6a6b77cf53dd', + passwordVariable: 'GITHUB_TOKEN', + usernameVariable: 'GITHUB_USER')]) { + sh '''#!/usr/bin/env bash - post { - always { - script { - // Publish status if it was missed (YETUS-1059) - withCredentials( - [usernamePassword(credentialsId: '683f5dcf-5552-4b28-9fb1-6a6b77cf53dd', - passwordVariable: 'GITHUB_TOKEN', - usernameVariable: 'GITHUB_USER')]) { - sh '''#!/usr/bin/env bash - YETUS_ARGS+=("--github-token=${GITHUB_TOKEN}") - YETUS_ARGS+=("--patch-dir=${WORKSPACE}/${PATCHDIR}") - TESTPATCHBIN="${WORKSPACE}/${YETUS}/precommit/src/main/shell/github-status-recovery.sh" - /usr/bin/env bash "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" ${EXTRA_ARGS} || true - ''' - } + # Copy the artifacts of Ubuntu focal build to workspace + cp -Rp "${WORKSPACE}/ubuntu-focal/out" "${WORKSPACE}" + + # Send Github status + chmod u+x "${SOURCEDIR}/dev-support/jenkins.sh" + "${SOURCEDIR}/dev-support/jenkins.sh" github_status_recovery + ''' + } - // Yetus output - archiveArtifacts "${env.PATCHDIR}/**" - // Publish the HTML report so that it can be looked at - // Has to be relative to WORKSPACE. - publishHTML (target: [ - allowMissing: true, - keepAll: true, - alwaysLinkToLastBuild: true, - // Has to be relative to WORKSPACE - reportDir: "${env.PATCHDIR}", - reportFiles: 'report.html', - reportName: 'Yetus Report' - ]) - // Publish JUnit results - try { - junit "${env.SOURCEDIR}/**/target/surefire-reports/*.xml" - } catch(e) { - echo 'junit processing: ' + e.toString() + // YETUS output + archiveArtifacts "out/**" + + // Publish the HTML report so that it can be looked at + // Has to be relative to WORKSPACE. + publishHTML (target: [ + allowMissing: true, + keepAll: true, + alwaysLinkToLastBuild: true, + // Has to be relative to WORKSPACE + reportDir: "out", + reportFiles: 'report.html', + reportName: 'Yetus Report' + ]) + + // Publish JUnit results + try { + junit "${SOURCEDIR}/**/target/surefire-reports/*.xml" + } catch(e) { + echo 'junit processing: ' + e.toString() + } + } + } + + cleanup() { + script { + sh '''#!/usr/bin/env bash + + chmod u+x "${SOURCEDIR}/dev-support/jenkins.sh" + "${SOURCEDIR}/dev-support/jenkins.sh" cleanup_ci_proc + ''' + } + } } - } } + } + post { // Jenkins pipeline jobs fill slaves on PRs without this :( cleanup() { script { - sh ''' - # See YETUS-764 - if [ -f "${WORKSPACE}/${PATCHDIR}/pidfile.txt" ]; then - echo "test-patch process appears to still be running: killing" - kill `cat "${WORKSPACE}/${PATCHDIR}/pidfile.txt"` || true - sleep 10 - fi - if [ -f "${WORKSPACE}/${PATCHDIR}/cidfile.txt" ]; then - echo "test-patch container appears to still be running: killing" - docker kill `cat "${WORKSPACE}/${PATCHDIR}/cidfile.txt"` || true - fi - # See HADOOP-13951 - chmod -R u+rxw "${WORKSPACE}" - ''' + sh '''#!/usr/bin/env bash + + # See HADOOP-13951 + chmod -R u+rxw "${WORKSPACE}" + ''' deleteDir() } } diff --git a/dev-support/jenkins.sh b/dev-support/jenkins.sh index 09905dd320cb7..35431fbe5b033 100644 --- a/dev-support/jenkins.sh +++ b/dev-support/jenkins.sh @@ -115,10 +115,10 @@ function run_ci() { TESTPATCHBIN="${WORKSPACE}/${YETUS}/precommit/src/main/shell/test-patch.sh" # this must be clean for every run - if [[ -d "${WORKSPACE}/${PATCHDIR}" ]]; then - rm -rf "${WORKSPACE:?}/${PATCHDIR}" + if [[ -d "${PATCHDIR}" ]]; then + rm -rf "${PATCHDIR:?}" fi - mkdir -p "${WORKSPACE}/${PATCHDIR}" + mkdir -p "${PATCHDIR}" # if given a JIRA issue, process it. If CHANGE_URL is set # (e.g., Github Branch Source plugin), process it. @@ -128,23 +128,23 @@ function run_ci() { if [[ -n "${JIRA_ISSUE_KEY}" ]]; then YETUS_ARGS+=("${JIRA_ISSUE_KEY}") elif [[ -z "${CHANGE_URL}" ]]; then - echo "Full build skipped" >"${WORKSPACE}/${PATCHDIR}/report.html" + echo "Full build skipped" >"${PATCHDIR}/report.html" exit 0 fi - YETUS_ARGS+=("--patch-dir=${WORKSPACE}/${PATCHDIR}") + YETUS_ARGS+=("--patch-dir=${PATCHDIR}") # where the source is located - YETUS_ARGS+=("--basedir=${WORKSPACE}/${SOURCEDIR}") + YETUS_ARGS+=("--basedir=${SOURCEDIR}") # our project defaults come from a personality file YETUS_ARGS+=("--project=hadoop") - YETUS_ARGS+=("--personality=${WORKSPACE}/${SOURCEDIR}/dev-support/bin/hadoop.sh") + YETUS_ARGS+=("--personality=${SOURCEDIR}/dev-support/bin/hadoop.sh") # lots of different output formats - YETUS_ARGS+=("--brief-report-file=${WORKSPACE}/${PATCHDIR}/brief.txt") - YETUS_ARGS+=("--console-report-file=${WORKSPACE}/${PATCHDIR}/console.txt") - YETUS_ARGS+=("--html-report-file=${WORKSPACE}/${PATCHDIR}/report.html") + YETUS_ARGS+=("--brief-report-file=${PATCHDIR}/brief.txt") + YETUS_ARGS+=("--console-report-file=${PATCHDIR}/console.txt") + YETUS_ARGS+=("--html-report-file=${PATCHDIR}/report.html") # enable writing back to Github YETUS_ARGS+=("--github-token=${GITHUB_TOKEN}") @@ -206,7 +206,49 @@ function run_ci() { "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" } -# Check if the CI needs to be run, if so, do so :) -if check_ci_run; then - run_ci +## @description Cleans up the processes started by YETUS +function cleanup_ci_proc() { + # See YETUS-764 + if [ -f "${PATCHDIR}/pidfile.txt" ]; then + echo "test-patch process appears to still be running: killing" + kill "$(cat "${PATCHDIR}/pidfile.txt")" || true + sleep 10 + fi + if [ -f "${PATCHDIR}/cidfile.txt" ]; then + echo "test-patch container appears to still be running: killing" + docker kill "$(cat "${PATCHDIR}/cidfile.txt")" || true + fi +} + +## @description Invokes github_status_recovery in YETUS's precommit +function github_status_recovery() { + YETUS_ARGS+=("--github-token=${GITHUB_TOKEN}") + YETUS_ARGS+=("--patch-dir=${PATCHDIR}") + TESTPATCHBIN="${WORKSPACE}/${YETUS}/precommit/src/main/shell/github-status-recovery.sh" + /usr/bin/env bash "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" "${EXTRA_ARGS}" || true +} + +if [ -z "$1" ]; then + echo "Must specify an argument for jenkins.sh" + echo "run_ci - Runs the CI based on platform image as defined by DOCKERFILE" + echo "cleanup_ci_proc - Cleans up the processes spawned for running the CI" + echo "github_status_recovery - Sends Github status (refer to YETUS precommit for more details)" + exit 1 +fi + +# Process arguments to jenkins.sh +if [ "$1" == "run_ci" ]; then + # Check if the CI needs to be run, if so, do so :) + if check_ci_run; then + run_ci + else + echo "No C++ file/C++ build/platform changes found, will not run CI" + fi +elif [ "$1" == "cleanup_ci_proc" ]; then + cleanup_ci_proc +elif [ "$1" == "github_status_recovery" ]; then + github_status_recovery +else + echo "Don't know how to process $1" + exit 1 fi From fe1ba9040c3419a32f4a844c5d0c9a1615b43d3e Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Tue, 27 Jul 2021 12:27:12 +0530 Subject: [PATCH 514/666] HDFS-16139. Update BPServiceActor Scheduler's nextBlockReportTime atomically (#3228). Contributed by Viraj Jasani. Signed-off-by: He Xiaoqiao --- .../hdfs/server/datanode/BPServiceActor.java | 42 ++++++++++++------- .../datanode/TestBpServiceActorScheduler.java | 23 ++++++---- 2 files changed, 40 insertions(+), 25 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 59958fb9981b8..54624140ac08a 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -39,6 +39,7 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -323,10 +324,10 @@ private void connectToNNAndHandshake() throws IOException { void triggerBlockReportForTests() { synchronized (ibrManager) { scheduler.scheduleHeartbeat(); - long oldBlockReportTime = scheduler.nextBlockReportTime; + long oldBlockReportTime = scheduler.getNextBlockReportTime(); scheduler.forceFullBlockReportNow(); ibrManager.notifyAll(); - while (oldBlockReportTime == scheduler.nextBlockReportTime) { + while (oldBlockReportTime == scheduler.getNextBlockReportTime()) { try { ibrManager.wait(100); } catch (InterruptedException e) { @@ -1163,8 +1164,8 @@ static class Scheduler { // nextBlockReportTime and nextHeartbeatTime may be assigned/read // by testing threads (through BPServiceActor#triggerXXX), while also // assigned/read by the actor thread. - @VisibleForTesting - volatile long nextBlockReportTime = monotonicNow(); + private final AtomicLong nextBlockReportTime = + new AtomicLong(monotonicNow()); @VisibleForTesting volatile long nextHeartbeatTime = monotonicNow(); @@ -1257,7 +1258,7 @@ boolean isLifelineDue(long startTime) { } boolean isBlockReportDue(long curTime) { - return nextBlockReportTime - curTime <= 0; + return nextBlockReportTime.get() - curTime <= 0; } boolean isOutliersReportDue(long curTime) { @@ -1281,15 +1282,15 @@ void forceFullBlockReportNow() { long scheduleBlockReport(long delay, boolean isRegistration) { if (delay > 0) { // send BR after random delay // Numerical overflow is possible here and is okay. - nextBlockReportTime = - monotonicNow() + ThreadLocalRandom.current().nextInt((int) (delay)); + nextBlockReportTime.getAndSet( + monotonicNow() + ThreadLocalRandom.current().nextInt((int) (delay))); } else { // send at next heartbeat - nextBlockReportTime = monotonicNow(); + nextBlockReportTime.getAndSet(monotonicNow()); } resetBlockReportTime = isRegistration; // reset future BRs for // randomness, post first block report to avoid regular BRs from all // DN's coming at one time. - return nextBlockReportTime; + return nextBlockReportTime.get(); } /** @@ -1302,8 +1303,8 @@ void scheduleNextBlockReport() { // If we have sent the first set of block reports, then wait a random // time before we start the periodic block reports. if (resetBlockReportTime) { - nextBlockReportTime = monotonicNow() + - ThreadLocalRandom.current().nextInt((int)(blockReportIntervalMs)); + nextBlockReportTime.getAndSet(monotonicNow() + + ThreadLocalRandom.current().nextInt((int) (blockReportIntervalMs))); resetBlockReportTime = false; } else { /* say the last block report was at 8:20:14. The current report @@ -1313,17 +1314,16 @@ void scheduleNextBlockReport() { * 2) unexpected like 21:35:43, next report should be at 2:20:14 * on the next day. */ - long factor = - (monotonicNow() - nextBlockReportTime + blockReportIntervalMs) - / blockReportIntervalMs; + long factor = (monotonicNow() - nextBlockReportTime.get() + + blockReportIntervalMs) / blockReportIntervalMs; if (factor != 0) { - nextBlockReportTime += factor * blockReportIntervalMs; + nextBlockReportTime.getAndAdd(factor * blockReportIntervalMs); } else { // If the difference between the present time and the scheduled // time is very less, the factor can be 0, so in that case, we can // ignore that negligible time, spent while sending the BRss and // schedule the next BR after the blockReportInterval. - nextBlockReportTime += blockReportIntervalMs; + nextBlockReportTime.getAndAdd(blockReportIntervalMs); } } } @@ -1336,6 +1336,16 @@ long getLifelineWaitTime() { return nextLifelineTime - monotonicNow(); } + @VisibleForTesting + long getNextBlockReportTime() { + return nextBlockReportTime.get(); + } + + @VisibleForTesting + void setNextBlockReportTime(long nextBlockReportTime) { + this.nextBlockReportTime.getAndSet(nextBlockReportTime); + } + /** * Wrapped for testing. * @return diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java index 36b6c695c87b9..b07e9e461f19b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java @@ -31,6 +31,7 @@ import static java.lang.Math.abs; import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -70,7 +71,7 @@ public void testScheduleBlockReportImmediate() { Scheduler scheduler = makeMockScheduler(now); scheduler.scheduleBlockReport(0, true); assertTrue(scheduler.resetBlockReportTime); - assertThat(scheduler.nextBlockReportTime, is(now)); + assertThat(scheduler.getNextBlockReportTime(), is(now)); } } @@ -81,8 +82,8 @@ public void testScheduleBlockReportDelayed() { final long delayMs = 10; scheduler.scheduleBlockReport(delayMs, true); assertTrue(scheduler.resetBlockReportTime); - assertTrue(scheduler.nextBlockReportTime - now >= 0); - assertTrue(scheduler.nextBlockReportTime - (now + delayMs) < 0); + assertTrue(scheduler.getNextBlockReportTime() - now >= 0); + assertTrue(scheduler.getNextBlockReportTime() - (now + delayMs) < 0); } } @@ -96,7 +97,8 @@ public void testScheduleNextBlockReport() { Scheduler scheduler = makeMockScheduler(now); assertTrue(scheduler.resetBlockReportTime); scheduler.scheduleNextBlockReport(); - assertTrue(scheduler.nextBlockReportTime - (now + BLOCK_REPORT_INTERVAL_MS) < 0); + assertTrue(scheduler.getNextBlockReportTime() + - (now + BLOCK_REPORT_INTERVAL_MS) < 0); } } @@ -110,7 +112,8 @@ public void testScheduleNextBlockReport2() { Scheduler scheduler = makeMockScheduler(now); scheduler.resetBlockReportTime = false; scheduler.scheduleNextBlockReport(); - assertThat(scheduler.nextBlockReportTime, is(now + BLOCK_REPORT_INTERVAL_MS)); + assertThat(scheduler.getNextBlockReportTime(), + is(now + BLOCK_REPORT_INTERVAL_MS)); } } @@ -129,10 +132,12 @@ public void testScheduleNextBlockReport3() { final long blockReportDelay = BLOCK_REPORT_INTERVAL_MS + random.nextInt(2 * (int) BLOCK_REPORT_INTERVAL_MS); final long origBlockReportTime = now - blockReportDelay; - scheduler.nextBlockReportTime = origBlockReportTime; + scheduler.setNextBlockReportTime(origBlockReportTime); scheduler.scheduleNextBlockReport(); - assertTrue(scheduler.nextBlockReportTime - now < BLOCK_REPORT_INTERVAL_MS); - assertTrue(((scheduler.nextBlockReportTime - origBlockReportTime) % BLOCK_REPORT_INTERVAL_MS) == 0); + assertTrue((scheduler.getNextBlockReportTime() - now) + < BLOCK_REPORT_INTERVAL_MS); + assertEquals(0, ((scheduler.getNextBlockReportTime() - origBlockReportTime) + % BLOCK_REPORT_INTERVAL_MS)); } } @@ -201,7 +206,7 @@ private Scheduler makeMockScheduler(long now) { HEARTBEAT_INTERVAL_MS, LIFELINE_INTERVAL_MS, BLOCK_REPORT_INTERVAL_MS, OUTLIER_REPORT_INTERVAL_MS)); doReturn(now).when(mockScheduler).monotonicNow(); - mockScheduler.nextBlockReportTime = now; + mockScheduler.setNextBlockReportTime(now); mockScheduler.nextHeartbeatTime = now; mockScheduler.nextOutliersReportTime = now; return mockScheduler; From 6f1504bd48f98090d017d56c416766f842420fc6 Mon Sep 17 00:00:00 2001 From: Mehakmeet Singh Date: Tue, 27 Jul 2021 15:38:51 +0530 Subject: [PATCH 515/666] HADOOP-13887. Support S3 client side encryption (S3-CSE) using AWS-SDK (#2706) This (big!) patch adds support for client side encryption in AWS S3, with keys managed by AWS-KMS. Read the documentation in encryption.md very, very carefully before use and consider it unstable. S3-CSE is enabled in the existing configuration option "fs.s3a.server-side-encryption-algorithm": fs.s3a.server-side-encryption-algorithm=CSE-KMS fs.s3a.server-side-encryption.key= You cannot enable CSE and SSE in the same client, although you can still enable a default SSE option in the S3 console. * Filesystem list/get status operations subtract 16 bytes from the length of all files >= 16 bytes long to compensate for the padding which CSE adds. * The SDK always warns about the specific algorithm chosen being deprecated. It is critical to use this algorithm for ranged GET requests to work (i.e. random IO). Ignore. * Unencrypted files CANNOT BE READ. The entire bucket SHOULD be encrypted with S3-CSE. * Uploading files may be a bit slower as blocks are now written sequentially. * The Multipart Upload API is disabled when S3-CSE is active. Contributed by Mehakmeet Singh --- ...AbstractContractMultipartUploaderTest.java | 3 + .../org/apache/hadoop/fs/s3a/Constants.java | 7 +- .../hadoop/fs/s3a/DefaultS3ClientFactory.java | 135 ++++++-- .../org/apache/hadoop/fs/s3a/Listing.java | 9 +- .../hadoop/fs/s3a/S3ABlockOutputStream.java | 43 ++- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 84 ++++- .../org/apache/hadoop/fs/s3a/S3AUtils.java | 46 +-- .../org/apache/hadoop/fs/s3a/Statistic.java | 9 +- .../hadoop/fs/s3a/impl/HeaderProcessing.java | 11 +- .../hadoop/fs/s3a/impl/InternalConstants.java | 8 + .../hadoop/fs/s3a/impl/RenameOperation.java | 2 +- .../hadoop/fs/s3a/impl/StoreContext.java | 15 +- .../fs/s3a/impl/StoreContextBuilder.java | 16 +- .../markdown/tools/hadoop-aws/encryption.md | 233 +++++++++++++- .../tools/hadoop-aws/troubleshooting_s3a.md | 278 ++++++++++++++++ .../hadoop/fs/s3a/AbstractS3ATestBase.java | 9 + .../hadoop/fs/s3a/EncryptionTestUtils.java | 2 +- .../fs/s3a/ITestS3AClientSideEncryption.java | 301 ++++++++++++++++++ .../s3a/ITestS3AClientSideEncryptionKms.java | 95 ++++++ ...TestS3AEncryptionSSEKMSUserDefinedKey.java | 9 +- ...estS3AEncryptionWithDefaultS3Settings.java | 6 +- .../hadoop/fs/s3a/ITestS3AInconsistency.java | 9 + .../hadoop/fs/s3a/ITestS3AMiscOperations.java | 3 +- .../fs/s3a/ITestS3GuardListConsistency.java | 6 +- .../hadoop/fs/s3a/S3ATestConstants.java | 13 + .../apache/hadoop/fs/s3a/S3ATestUtils.java | 12 + .../fs/s3a/TestS3AInputStreamRetry.java | 2 +- .../hadoop/fs/s3a/TestSSEConfiguration.java | 6 +- .../hadoop/fs/s3a/auth/ITestCustomSigner.java | 34 +- .../ITestSessionDelegationInFileystem.java | 23 +- .../fs/s3a/commit/AbstractCommitITest.java | 9 +- .../ITestS3AFileContextStatistics.java | 33 +- .../scale/ITestS3AHugeFilesEncryption.java | 8 +- .../ITestS3AHugeFilesSSECDiskBlocks.java | 2 +- .../scale/ITestS3AInputStreamPerformance.java | 5 + 35 files changed, 1371 insertions(+), 115 deletions(-) create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java create mode 100644 hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryptionKms.java diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index 90e12a84487a0..3e754e4578de8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -89,6 +89,9 @@ public void setup() throws Exception { final FileSystem fs = getFileSystem(); Path testPath = getContract().getTestPath(); + Assume.assumeTrue("Multipart uploader is not supported", + fs.hasPathCapability(testPath, + CommonPathCapabilities.FS_MULTIPART_UPLOADER)); uploader0 = fs.createMultipartUploader(testPath).build(); uploader1 = fs.createMultipartUploader(testPath).build(); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index b741bc2301744..ea3de1da4b443 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -420,7 +420,12 @@ private Constants() { "fs.s3a.multipart.purge.age"; public static final long DEFAULT_PURGE_EXISTING_MULTIPART_AGE = 86400; - // s3 server-side encryption, see S3AEncryptionMethods for valid options + /** + * s3 server-side encryption or s3 client side encryption method, see + * {@link S3AEncryptionMethods} for valid options. + * + * {@value} + */ public static final String SERVER_SIDE_ENCRYPTION_ALGORITHM = "fs.s3a.server-side-encryption-algorithm"; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java index 7dc920ce50c1d..2abef630a806e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java @@ -25,13 +25,23 @@ import com.amazonaws.SdkClientException; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.handlers.RequestHandler2; +import com.amazonaws.regions.RegionUtils; import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3Builder; import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.AmazonS3EncryptionClientV2Builder; +import com.amazonaws.services.s3.AmazonS3EncryptionV2; import com.amazonaws.services.s3.S3ClientOptions; import com.amazonaws.services.s3.internal.ServiceUtils; +import com.amazonaws.services.s3.model.CryptoConfigurationV2; +import com.amazonaws.services.s3.model.CryptoMode; +import com.amazonaws.services.s3.model.CryptoRangeGetMode; +import com.amazonaws.services.s3.model.EncryptionMaterialsProvider; +import com.amazonaws.services.s3.model.KMSEncryptionMaterialsProvider; import com.amazonaws.util.AwsHostNameUtils; import com.amazonaws.util.RuntimeHttpUtils; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,6 +58,8 @@ import static org.apache.hadoop.fs.s3a.Constants.AWS_S3_CENTRAL_REGION; import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING; import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING_DEFAULT; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_KEY; import static org.apache.hadoop.fs.s3a.S3AUtils.translateException; /** @@ -112,15 +124,77 @@ public AmazonS3 createS3Client( } try { - return buildAmazonS3Client( - awsConf, - parameters); + if (S3AEncryptionMethods.getMethod(S3AUtils. + lookupPassword(conf, SERVER_SIDE_ENCRYPTION_ALGORITHM, null)) + .equals(S3AEncryptionMethods.CSE_KMS)) { + return buildAmazonS3EncryptionClient( + awsConf, + parameters); + } else { + return buildAmazonS3Client( + awsConf, + parameters); + } } catch (SdkClientException e) { // SDK refused to build. throw translateException("creating AWS S3 client", uri.toString(), e); } } + /** + * Create an {@link AmazonS3} client of type + * {@link AmazonS3EncryptionV2} if CSE is enabled. + * + * @param awsConf AWS configuration. + * @param parameters parameters. + * + * @return new AmazonS3 client. + * @throws IOException if lookupPassword() has any problem. + */ + protected AmazonS3 buildAmazonS3EncryptionClient( + final ClientConfiguration awsConf, + final S3ClientCreationParameters parameters) throws IOException { + + AmazonS3 client; + AmazonS3EncryptionClientV2Builder builder = + new AmazonS3EncryptionClientV2Builder(); + Configuration conf = getConf(); + + //CSE-KMS Method + String kmsKeyId = S3AUtils.lookupPassword(conf, + SERVER_SIDE_ENCRYPTION_KEY, null); + // Check if kmsKeyID is not null + Preconditions.checkArgument(kmsKeyId != null, "CSE-KMS method " + + "requires KMS key ID. Use " + SERVER_SIDE_ENCRYPTION_KEY + + " property to set it. "); + + EncryptionMaterialsProvider materialsProvider = + new KMSEncryptionMaterialsProvider(kmsKeyId); + builder.withEncryptionMaterialsProvider(materialsProvider); + //Configure basic params of a S3 builder. + configureBasicParams(builder, awsConf, parameters); + + // Configuring endpoint. + AmazonS3EncryptionClientV2Builder.EndpointConfiguration epr + = createEndpointConfiguration(parameters.getEndpoint(), + awsConf, getConf().getTrimmed(AWS_REGION)); + configureEndpoint(builder, epr); + + // Create cryptoConfig. + CryptoConfigurationV2 cryptoConfigurationV2 = + new CryptoConfigurationV2(CryptoMode.AuthenticatedEncryption) + .withRangeGetMode(CryptoRangeGetMode.ALL); + if (epr != null) { + cryptoConfigurationV2 + .withAwsKmsRegion(RegionUtils.getRegion(epr.getSigningRegion())); + LOG.debug("KMS region used: {}", cryptoConfigurationV2.getAwsKmsRegion()); + } + builder.withCryptoConfiguration(cryptoConfigurationV2); + client = builder.build(); + + return client; + } + /** * Use the Builder API to create an AWS S3 client. *

    @@ -137,33 +211,60 @@ protected AmazonS3 buildAmazonS3Client( final ClientConfiguration awsConf, final S3ClientCreationParameters parameters) { AmazonS3ClientBuilder b = AmazonS3Client.builder(); - b.withCredentials(parameters.getCredentialSet()); - b.withClientConfiguration(awsConf); - b.withPathStyleAccessEnabled(parameters.isPathStyleAccess()); + configureBasicParams(b, awsConf, parameters); + + // endpoint set up is a PITA + AwsClientBuilder.EndpointConfiguration epr + = createEndpointConfiguration(parameters.getEndpoint(), + awsConf, getConf().getTrimmed(AWS_REGION)); + configureEndpoint(b, epr); + final AmazonS3 client = b.build(); + return client; + } + + /** + * A method to configure basic AmazonS3Builder parameters. + * + * @param builder Instance of AmazonS3Builder used. + * @param awsConf ClientConfiguration used. + * @param parameters Parameters used to set in the builder. + */ + private void configureBasicParams(AmazonS3Builder builder, + ClientConfiguration awsConf, S3ClientCreationParameters parameters) { + builder.withCredentials(parameters.getCredentialSet()); + builder.withClientConfiguration(awsConf); + builder.withPathStyleAccessEnabled(parameters.isPathStyleAccess()); if (parameters.getMetrics() != null) { - b.withMetricsCollector( + builder.withMetricsCollector( new AwsStatisticsCollector(parameters.getMetrics())); } if (parameters.getRequestHandlers() != null) { - b.withRequestHandlers( + builder.withRequestHandlers( parameters.getRequestHandlers().toArray(new RequestHandler2[0])); } if (parameters.getMonitoringListener() != null) { - b.withMonitoringListener(parameters.getMonitoringListener()); + builder.withMonitoringListener(parameters.getMonitoringListener()); } - // endpoint set up is a PITA - AwsClientBuilder.EndpointConfiguration epr - = createEndpointConfiguration(parameters.getEndpoint(), - awsConf, getConf().getTrimmed(AWS_REGION)); + } + + /** + * A method to configure endpoint and Region for an AmazonS3Builder. + * + * @param builder Instance of AmazonS3Builder used. + * @param epr EndpointConfiguration used to set in builder. + */ + private void configureEndpoint( + AmazonS3Builder builder, + AmazonS3Builder.EndpointConfiguration epr) { if (epr != null) { // an endpoint binding was constructed: use it. - b.withEndpointConfiguration(epr); + builder.withEndpointConfiguration(epr); } else { // no idea what the endpoint is, so tell the SDK // to work it out at the cost of an extra HEAD request - b.withForceGlobalBucketAccessEnabled(true); + builder.withForceGlobalBucketAccessEnabled(true); // HADOOP-17771 force set the region so the build process doesn't halt. String region = getConf().getTrimmed(AWS_REGION, AWS_S3_CENTRAL_REGION); LOG.debug("fs.s3a.endpoint.region=\"{}\"", region); @@ -171,7 +272,7 @@ protected AmazonS3 buildAmazonS3Client( // there's either an explicit region or we have fallen back // to the central one. LOG.debug("Using default endpoint; setting region to {}", region); - b.setRegion(region); + builder.setRegion(region); } else { // no region. // allow this if people really want it; it is OK to rely on this @@ -180,8 +281,6 @@ protected AmazonS3 buildAmazonS3Client( LOG.debug(SDK_REGION_CHAIN_IN_USE); } } - final AmazonS3 client = b.build(); - return client; } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java index 113e6f4de2abd..5a4f5514d50b1 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java @@ -90,6 +90,7 @@ public class Listing extends AbstractStoreOperation { private static final Logger LOG = S3AFileSystem.LOG; + private final boolean isCSEEnabled; static final FileStatusAcceptor ACCEPT_ALL_BUT_S3N = new AcceptAllButS3nDirs(); @@ -97,9 +98,10 @@ public class Listing extends AbstractStoreOperation { private final ListingOperationCallbacks listingOperationCallbacks; public Listing(ListingOperationCallbacks listingOperationCallbacks, - StoreContext storeContext) { + StoreContext storeContext) { super(storeContext); this.listingOperationCallbacks = listingOperationCallbacks; + this.isCSEEnabled = storeContext.isCSEEnabled(); } /** @@ -687,7 +689,7 @@ private boolean buildNextStatusBatch(S3ListResult objects) { S3AFileStatus status = createFileStatus(keyPath, summary, listingOperationCallbacks.getDefaultBlockSize(keyPath), getStoreContext().getUsername(), - summary.getETag(), null); + summary.getETag(), null, isCSEEnabled); LOG.debug("Adding: {}", status); stats.add(status); added++; @@ -961,7 +963,7 @@ public AcceptFilesOnly(Path qualifiedPath) { public boolean accept(Path keyPath, S3ObjectSummary summary) { return !keyPath.equals(qualifiedPath) && !summary.getKey().endsWith(S3N_FOLDER_SUFFIX) - && !objectRepresentsDirectory(summary.getKey(), summary.getSize()); + && !objectRepresentsDirectory(summary.getKey()); } /** @@ -1049,6 +1051,7 @@ public boolean accept(FileStatus status) { } } + @SuppressWarnings("unchecked") public static RemoteIterator toLocatedFileStatusIterator( RemoteIterator iterator) { return (RemoteIterator < LocatedFileStatus >) iterator; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java index 5ba39aae2e29a..ddb933d6eb9a7 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java @@ -155,6 +155,9 @@ class S3ABlockOutputStream extends OutputStream implements private static final LogExactlyOnce WARN_ON_SYNCABLE = new LogExactlyOnce(LOG); + /** is client side encryption enabled? */ + private final boolean isCSEEnabled; + /** * An S3A output stream which uploads partitions in a separate pool of * threads; different {@link S3ADataBlocks.BlockFactory} @@ -189,6 +192,7 @@ class S3ABlockOutputStream extends OutputStream implements LOG.debug("Put tracker requests multipart upload"); initMultipartUpload(); } + this.isCSEEnabled = builder.isCSEEnabled; } /** @@ -307,29 +311,34 @@ public synchronized void write(byte[] source, int offset, int len) // of capacity // Trigger an upload then process the remainder. LOG.debug("writing more data than block has capacity -triggering upload"); - uploadCurrentBlock(); + uploadCurrentBlock(false); // tail recursion is mildly expensive, but given buffer sizes must be MB. // it's unlikely to recurse very deeply. this.write(source, offset + written, len - written); } else { - if (remainingCapacity == 0) { + if (remainingCapacity == 0 && !isCSEEnabled) { // the whole buffer is done, trigger an upload - uploadCurrentBlock(); + uploadCurrentBlock(false); } } } /** * Start an asynchronous upload of the current block. + * + * @param isLast true, if part being uploaded is last and client side + * encryption is enabled. * @throws IOException Problems opening the destination for upload, - * initializing the upload, or if a previous operation has failed. + * initializing the upload, or if a previous operation + * has failed. */ - private synchronized void uploadCurrentBlock() throws IOException { + private synchronized void uploadCurrentBlock(boolean isLast) + throws IOException { Preconditions.checkState(hasActiveBlock(), "No active block"); LOG.debug("Writing block # {}", blockCount); initMultipartUpload(); try { - multiPartUpload.uploadBlockAsync(getActiveBlock()); + multiPartUpload.uploadBlockAsync(getActiveBlock(), isLast); bytesSubmitted += getActiveBlock().dataSize(); } finally { // set the block to null, so the next write will create a new block. @@ -389,8 +398,9 @@ public void close() throws IOException { // PUT the final block if (hasBlock && (block.hasData() || multiPartUpload.getPartsSubmitted() == 0)) { - //send last part - uploadCurrentBlock(); + // send last part and set the value of isLastPart to true. + // Necessary to set this "true" in case of client side encryption. + uploadCurrentBlock(true); } // wait for the partial uploads to finish final List partETags = @@ -760,7 +770,8 @@ public void maybeRethrowUploadFailure() throws IOException { * @throws IOException upload failure * @throws PathIOException if too many blocks were written */ - private void uploadBlockAsync(final S3ADataBlocks.DataBlock block) + private void uploadBlockAsync(final S3ADataBlocks.DataBlock block, + Boolean isLast) throws IOException { LOG.debug("Queueing upload of {} for upload {}", block, uploadId); Preconditions.checkNotNull(uploadId, "Null uploadId"); @@ -781,6 +792,7 @@ private void uploadBlockAsync(final S3ADataBlocks.DataBlock block) uploadData.getUploadStream(), uploadData.getFile(), 0L); + request.setLastPart(isLast); } catch (SdkBaseException aws) { // catch and translate IOException e = translateException("upload", key, aws); @@ -1042,6 +1054,9 @@ public static final class BlockOutputStreamBuilder { /** Should Syncable calls be downgraded? */ private boolean downgradeSyncableExceptions; + /** is Client side Encryption enabled? */ + private boolean isCSEEnabled; + private BlockOutputStreamBuilder() { } @@ -1157,5 +1172,15 @@ public BlockOutputStreamBuilder withDowngradeSyncableExceptions( downgradeSyncableExceptions = value; return this; } + + /** + * Set builder value. + * @param value new value + * @return the builder + */ + public BlockOutputStreamBuilder withCSEEnabled(boolean value) { + isCSEEnabled = value; + return this; + } } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index e30ad5c084245..75f67a09f6483 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -52,6 +52,7 @@ import com.amazonaws.AmazonServiceException; import com.amazonaws.SdkBaseException; import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.Headers; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.CopyObjectRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest; @@ -80,6 +81,7 @@ import com.amazonaws.event.ProgressListener; import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; import org.apache.hadoop.fs.store.audit.ActiveThreadSpanSource; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @@ -212,6 +214,7 @@ import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletionIgnoringExceptions; import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isObjectNotFound; import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isUnknownBucket; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.CSE_PADDING_LENGTH; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DEFAULT_UPLOAD_PART_COUNT_LIMIT; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DELETE_CONSIDERED_IDEMPOTENT; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_404; @@ -356,6 +359,11 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, private AuditManagerS3A auditManager = AuditIntegration.stubAuditManager(); + /** + * Is this S3A FS instance using S3 client side encryption? + */ + private boolean isCSEEnabled; + /** Add any deprecated keys. */ @SuppressWarnings("deprecation") private static void addDeprecatedKeys() { @@ -413,12 +421,16 @@ public void initialize(URI name, Configuration originalConf) // DT Bindings may override this setEncryptionSecrets(new EncryptionSecrets( getEncryptionAlgorithm(bucket, conf), - getServerSideEncryptionKey(bucket, getConf()))); + getS3EncryptionKey(bucket, getConf()))); invoker = new Invoker(new S3ARetryPolicy(getConf()), onRetry); instrumentation = new S3AInstrumentation(uri); initializeStatisticsBinding(); - + // If CSE-KMS method is set then CSE is enabled. + isCSEEnabled = S3AUtils.lookupPassword(conf, + SERVER_SIDE_ENCRYPTION_ALGORITHM, null) != null; + LOG.debug("Client Side Encryption enabled: {}", isCSEEnabled); + setCSEGauge(); // Username is the current user at the time the FS was instantiated. owner = UserGroupInformation.getCurrentUser(); username = owner.getShortUserName(); @@ -508,6 +520,10 @@ public void initialize(URI name, Configuration originalConf) blockFactory = S3ADataBlocks.createFactory(this, blockOutputBuffer); blockOutputActiveBlocks = intOption(conf, FAST_UPLOAD_ACTIVE_BLOCKS, DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS, 1); + // If CSE is enabled, do multipart uploads serially. + if (isCSEEnabled) { + blockOutputActiveBlocks = 1; + } LOG.debug("Using S3ABlockOutputStream with buffer = {}; block={};" + " queue limit={}", blockOutputBuffer, partSize, blockOutputActiveBlocks); @@ -552,7 +568,22 @@ public void initialize(URI name, Configuration originalConf) stopAllServices(); throw e; } + } + /** + * Set the client side encryption gauge to 0 or 1, indicating if CSE is + * enabled through the gauge or not. + */ + private void setCSEGauge() { + IOStatisticsStore ioStatisticsStore = + (IOStatisticsStore) getIOStatistics(); + if (isCSEEnabled) { + ioStatisticsStore + .setGauge(CLIENT_SIDE_ENCRYPTION_ENABLED.getSymbol(), 1L); + } else { + ioStatisticsStore + .setGauge(CLIENT_SIDE_ENCRYPTION_ENABLED.getSymbol(), 0L); + } } /** @@ -1151,7 +1182,7 @@ public ChangeDetectionPolicy getChangeDetectionPolicy() { * Get the encryption algorithm of this endpoint. * @return the encryption algorithm. */ - public S3AEncryptionMethods getServerSideEncryptionAlgorithm() { + public S3AEncryptionMethods getS3EncryptionAlgorithm() { return encryptionSecrets.getEncryptionMethod(); } @@ -1492,7 +1523,7 @@ private S3ObjectAttributes createObjectAttributes( return new S3ObjectAttributes(bucket, f, pathToKey(f), - getServerSideEncryptionAlgorithm(), + getS3EncryptionAlgorithm(), encryptionSecrets.getEncryptionKey(), eTag, versionId, @@ -1618,7 +1649,8 @@ private FSDataOutputStream innerCreateFile(Path path, .withDowngradeSyncableExceptions( getConf().getBoolean( DOWNGRADE_SYNCABLE_EXCEPTIONS, - DOWNGRADE_SYNCABLE_EXCEPTIONS_DEFAULT)); + DOWNGRADE_SYNCABLE_EXCEPTIONS_DEFAULT)) + .withCSEEnabled(isCSEEnabled); return new FSDataOutputStream( new S3ABlockOutputStream(builder), null); @@ -3670,7 +3702,14 @@ S3AFileStatus s3GetFileStatus(final Path path, // look for the simple file ObjectMetadata meta = getObjectMetadata(key); LOG.debug("Found exact file: normal file {}", key); - return new S3AFileStatus(meta.getContentLength(), + long contentLength = meta.getContentLength(); + // check if CSE is enabled, then strip padded length. + if (isCSEEnabled + && meta.getUserMetaDataOf(Headers.CRYPTO_CEK_ALGORITHM) != null + && contentLength >= CSE_PADDING_LENGTH) { + contentLength -= CSE_PADDING_LENGTH; + } + return new S3AFileStatus(contentLength, dateToLong(meta.getLastModified()), path, getDefaultBlockSize(path), @@ -4272,7 +4311,7 @@ void finishedWrite(String key, long length, String eTag, String versionId, key, length, eTag, versionId); Path p = keyToQualifiedPath(key); Preconditions.checkArgument(length >= 0, "content length is negative"); - final boolean isDir = objectRepresentsDirectory(key, length); + final boolean isDir = objectRepresentsDirectory(key); // kick off an async delete CompletableFuture deletion; if (!keepDirectoryMarkers(p)) { @@ -4452,9 +4491,9 @@ public String toString() { sb.append(", blockSize=").append(getDefaultBlockSize()); } sb.append(", multiPartThreshold=").append(multiPartThreshold); - if (getServerSideEncryptionAlgorithm() != null) { - sb.append(", serverSideEncryptionAlgorithm='") - .append(getServerSideEncryptionAlgorithm()) + if (getS3EncryptionAlgorithm() != null) { + sb.append(", s3EncryptionAlgorithm='") + .append(getS3EncryptionAlgorithm()) .append('\''); } if (blockFactory != null) { @@ -4480,6 +4519,7 @@ public String toString() { .append(getInstrumentation().toString()) .append("}"); } + sb.append(", ClientSideEncryption=").append(isCSEEnabled); sb.append('}'); return sb.toString(); } @@ -5090,8 +5130,9 @@ public boolean hasPathCapability(final Path path, final String capability) return isMagicCommitEnabled(); case SelectConstants.S3_SELECT_CAPABILITY: - // select is only supported if enabled - return SelectBinding.isSelectEnabled(getConf()); + // select is only supported if enabled and client side encryption is + // disabled. + return !isCSEEnabled && SelectBinding.isSelectEnabled(getConf()); case CommonPathCapabilities.FS_CHECKSUMS: // capability depends on FS configuration @@ -5099,8 +5140,10 @@ public boolean hasPathCapability(final Path path, final String capability) ETAG_CHECKSUM_ENABLED_DEFAULT); case CommonPathCapabilities.ABORTABLE_STREAM: - case CommonPathCapabilities.FS_MULTIPART_UPLOADER: return true; + case CommonPathCapabilities.FS_MULTIPART_UPLOADER: + // client side encryption doesn't support multipart uploader. + return !isCSEEnabled; // this client is safe to use with buckets // containing directory markers anywhere in @@ -5236,7 +5279,7 @@ private FSDataInputStream select(final Path source, */ private void requireSelectSupport(final Path source) throws UnsupportedOperationException { - if (!SelectBinding.isSelectEnabled(getConf())) { + if (!isCSEEnabled && !SelectBinding.isSelectEnabled(getConf())) { throw new UnsupportedOperationException( SelectConstants.SELECT_UNSUPPORTED); @@ -5359,6 +5402,10 @@ public CompletableFuture openFileWithOptions( public S3AMultipartUploaderBuilder createMultipartUploader( final Path basePath) throws IOException { + if(isCSEEnabled) { + throw new UnsupportedOperationException("Multi-part uploader not " + + "supported for Client side encryption."); + } final Path path = makeQualified(basePath); try (AuditSpan span = entryPoint(MULTIPART_UPLOAD_INSTANTIATED, path)) { StoreContext ctx = createStoreContext(); @@ -5397,6 +5444,7 @@ public StoreContext createStoreContext() { .setContextAccessors(new ContextAccessorsImpl()) .setTimeProvider(getTtlTimeProvider()) .setAuditor(getAuditor()) + .setEnableCSE(isCSEEnabled) .build(); } @@ -5466,4 +5514,12 @@ public RequestFactory getRequestFactory() { return S3AFileSystem.this.getRequestFactory(); } } + + /** + * a method to know if Client side encryption is enabled or not. + * @return a boolean stating if CSE is enabled. + */ + public boolean isCSEEnabled() { + return isCSEEnabled; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java index a897a0a2bb66c..8f8bf684a5078 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java @@ -87,6 +87,7 @@ import static org.apache.commons.lang3.StringUtils.isEmpty; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isUnknownBucket; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.CSE_PADDING_LENGTH; import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.translateDeleteException; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; import static org.apache.hadoop.util.functional.RemoteIterators.filteringRemoteIterator; @@ -521,6 +522,7 @@ public static String stringify(AmazonS3Exception e) { * @param owner owner of the file * @param eTag S3 object eTag or null if unavailable * @param versionId S3 object versionId or null if unavailable + * @param isCSEEnabled is client side encryption enabled? * @return a status entry */ public static S3AFileStatus createFileStatus(Path keyPath, @@ -528,10 +530,15 @@ public static S3AFileStatus createFileStatus(Path keyPath, long blockSize, String owner, String eTag, - String versionId) { + String versionId, + boolean isCSEEnabled) { long size = summary.getSize(); + // check if cse is enabled; strip out constant padding length. + if (isCSEEnabled && size >= CSE_PADDING_LENGTH) { + size -= CSE_PADDING_LENGTH; + } return createFileStatus(keyPath, - objectRepresentsDirectory(summary.getKey(), size), + objectRepresentsDirectory(summary.getKey()), size, summary.getLastModified(), blockSize, owner, eTag, versionId); } @@ -572,14 +579,11 @@ private static S3AFileStatus createFileStatus(Path keyPath, boolean isDir, /** * Predicate: does the object represent a directory?. * @param name object name - * @param size object size * @return true if it meets the criteria for being an object */ - public static boolean objectRepresentsDirectory(final String name, - final long size) { + public static boolean objectRepresentsDirectory(final String name) { return !name.isEmpty() - && name.charAt(name.length() - 1) == '/' - && size == 0L; + && name.charAt(name.length() - 1) == '/'; } /** @@ -1564,7 +1568,7 @@ static void patchSecurityCredentialProviders(Configuration conf) { } /** - * Get any SSE key from a configuration/credential provider. + * Get any SSE/CSE key from a configuration/credential provider. * This operation handles the case where the option has been * set in the provider or configuration to the option * {@code OLD_S3A_SERVER_SIDE_ENCRYPTION_KEY}. @@ -1574,7 +1578,7 @@ static void patchSecurityCredentialProviders(Configuration conf) { * @return the encryption key or "" * @throws IllegalArgumentException bad arguments. */ - public static String getServerSideEncryptionKey(String bucket, + public static String getS3EncryptionKey(String bucket, Configuration conf) { try { return lookupPassword(bucket, conf, SERVER_SIDE_ENCRYPTION_KEY); @@ -1585,7 +1589,7 @@ public static String getServerSideEncryptionKey(String bucket, } /** - * Get the server-side encryption algorithm. + * Get the server-side encryption or client side encryption algorithm. * This includes validation of the configuration, checking the state of * the encryption key given the chosen algorithm. * @@ -1597,22 +1601,23 @@ public static String getServerSideEncryptionKey(String bucket, */ public static S3AEncryptionMethods getEncryptionAlgorithm(String bucket, Configuration conf) throws IOException { - S3AEncryptionMethods sse = S3AEncryptionMethods.getMethod( + S3AEncryptionMethods encryptionMethod = S3AEncryptionMethods.getMethod( lookupPassword(bucket, conf, SERVER_SIDE_ENCRYPTION_ALGORITHM)); - String sseKey = getServerSideEncryptionKey(bucket, conf); - int sseKeyLen = StringUtils.isBlank(sseKey) ? 0 : sseKey.length(); - String diagnostics = passwordDiagnostics(sseKey, "key"); - switch (sse) { + String encryptionKey = getS3EncryptionKey(bucket, conf); + int encryptionKeyLen = + StringUtils.isBlank(encryptionKey) ? 0 : encryptionKey.length(); + String diagnostics = passwordDiagnostics(encryptionKey, "key"); + switch (encryptionMethod) { case SSE_C: LOG.debug("Using SSE-C with {}", diagnostics); - if (sseKeyLen == 0) { + if (encryptionKeyLen == 0) { throw new IOException(SSE_C_NO_KEY_ERROR); } break; case SSE_S3: - if (sseKeyLen != 0) { + if (encryptionKeyLen != 0) { throw new IOException(SSE_S3_WITH_KEY_ERROR + " (" + diagnostics + ")"); } @@ -1623,12 +1628,17 @@ public static S3AEncryptionMethods getEncryptionAlgorithm(String bucket, diagnostics); break; + case CSE_KMS: + LOG.debug("Using CSE-KMS with {}", + diagnostics); + break; + case NONE: default: LOG.debug("Data is unencrypted"); break; } - return sse; + return encryptionMethod; } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java index 7890e2d946b76..fb28a40c3b8a6 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java @@ -577,7 +577,14 @@ public enum Statistic { AUDIT_REQUEST_EXECUTION( AuditStatisticNames.AUDIT_REQUEST_EXECUTION, "AWS request made", - TYPE_COUNTER); + TYPE_COUNTER), + + /* Client side encryption gauge */ + CLIENT_SIDE_ENCRYPTION_ENABLED( + "client_side_encryption_enabled", + "gauge to indicate if client side encryption is enabled", + TYPE_GAUGE + ); /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java index 8c39aa4632d37..17394b701e6f8 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java @@ -301,8 +301,15 @@ private Map retrieveHeaders( md.getContentEncoding()); maybeSetHeader(headers, XA_CONTENT_LANGUAGE, md.getContentLanguage()); - maybeSetHeader(headers, XA_CONTENT_LENGTH, - md.getContentLength()); + // If CSE is enabled, use the unencrypted content length. + if (md.getUserMetaDataOf(Headers.CRYPTO_CEK_ALGORITHM) != null + && md.getUserMetaDataOf(Headers.UNENCRYPTED_CONTENT_LENGTH) != null) { + maybeSetHeader(headers, XA_CONTENT_LENGTH, + md.getUserMetaDataOf(Headers.UNENCRYPTED_CONTENT_LENGTH)); + } else { + maybeSetHeader(headers, XA_CONTENT_LENGTH, + md.getContentLength()); + } maybeSetHeader(headers, XA_CONTENT_MD5, md.getContentMD5()); maybeSetHeader(headers, XA_CONTENT_RANGE, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java index cf962b87a4176..51b1bf60a2fc6 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java @@ -126,4 +126,12 @@ private InternalConstants() { */ public static final String AWS_REGION_SYSPROP = "aws.region"; + /** + * S3 client side encryption adds padding to the content length of constant + * length of 16 bytes (at the moment, since we have only 1 content + * encryption algorithm). Use this to subtract while listing the content + * length when certain conditions are met. + */ + public static final int CSE_PADDING_LENGTH = 16; + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java index efc789ba322fa..b7e0f353fabb0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java @@ -638,7 +638,7 @@ private Path copySourceAndUpdateTracker( copyResult = callbacks.copyFile(srcKey, destinationKey, srcAttributes, readContext); } - if (objectRepresentsDirectory(srcKey, len)) { + if (objectRepresentsDirectory(srcKey)) { renameTracker.directoryMarkerCopied( sourceFile, destination, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java index ac29780dcbaec..d60e1a2cd289e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java @@ -129,6 +129,9 @@ public class StoreContext implements ActiveThreadSpanSource { /** Operation Auditor. */ private final AuditSpanSource auditor; + /** Is client side encryption enabled? */ + private final boolean isCSEEnabled; + /** * Instantiate. */ @@ -150,7 +153,8 @@ public class StoreContext implements ActiveThreadSpanSource { final boolean useListV1, final ContextAccessors contextAccessors, final ITtlTimeProvider timeProvider, - final AuditSpanSource auditor) { + final AuditSpanSource auditor, + final boolean isCSEEnabled) { this.fsURI = fsURI; this.bucket = bucket; this.configuration = configuration; @@ -172,6 +176,7 @@ public class StoreContext implements ActiveThreadSpanSource { this.contextAccessors = contextAccessors; this.timeProvider = timeProvider; this.auditor = auditor; + this.isCSEEnabled = isCSEEnabled; } public URI getFsURI() { @@ -429,4 +434,12 @@ public AuditSpan getActiveAuditSpan() { public RequestFactory getRequestFactory() { return contextAccessors.getRequestFactory(); } + + /** + * return if the store context have client side encryption enabled. + * @return boolean indicating if CSE is enabled or not. + */ + public boolean isCSEEnabled() { + return isCSEEnabled; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java index 468af1bb77c8b..d4021a731cca2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java @@ -73,6 +73,8 @@ public class StoreContextBuilder { private AuditSpanSource auditor; + private boolean isCSEEnabled; + public StoreContextBuilder setFsURI(final URI fsURI) { this.fsURI = fsURI; return this; @@ -180,6 +182,17 @@ public StoreContextBuilder setAuditor( return this; } + /** + * set is client side encryption boolean value. + * @param value value indicating if client side encryption is enabled or not. + * @return builder instance. + */ + public StoreContextBuilder setEnableCSE( + boolean value) { + isCSEEnabled = value; + return this; + } + @SuppressWarnings("deprecation") public StoreContext build() { return new StoreContext(fsURI, @@ -199,6 +212,7 @@ public StoreContext build() { useListV1, contextAccessors, timeProvider, - auditor); + auditor, + isCSEEnabled); } } diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md index d869705ad531e..888ed8e211a9f 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md @@ -21,9 +21,13 @@ ## Introduction The S3A filesystem client supports Amazon S3's Server Side Encryption -for at-rest data encryption. -You should to read up on the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -for S3 Server Side Encryption for up to date information on the encryption mechanisms. +and Client Side Encryption for encrypting data at-rest. + + +For up to date information on the encryption mechanisms, read: + +* [Protecting data using server-side encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +* [Protecting data using client-side encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingClientSideEncryption.html) @@ -32,18 +36,21 @@ Any new file written will be encrypted with this encryption configuration. When the S3A client reads a file, S3 will attempt to decrypt it using the mechanism and keys with which the file was encrypted. -* It is **NOT** advised to mix and match encryption types in a bucket +* It is **NOT** advised to mix and match encryption types in a bucket. * It is much simpler and safer to encrypt with just one type and key per bucket. * You can use AWS bucket policies to mandate encryption rules for a bucket. * You can use S3A per-bucket configuration to ensure that S3A clients use encryption policies consistent with the mandated rules. -* You can use S3 Default Encryption to encrypt data without needing to +* You can use S3 Default Encryption in AWS console to encrypt data without needing to set anything in the client. * Changing the encryption options on the client does not change how existing files were encrypted, except when the files are renamed. -* For all mechanisms other than SSE-C, clients do not need any configuration +* For all mechanisms other than SSE-C and CSE-KMS, clients do not need any configuration options set in order to read encrypted data: it is all automatically handled in S3 itself. +* Encryption options and secrets are collected by [S3A Delegation Tokens](delegation_tokens.html) and passed to workers during job submission. +* Encryption options and secrets MAY be stored in JCEKS files or any other Hadoop credential provider service. + This allows for more secure storage than XML files, including password protection of the secrets. ## How data is encrypted @@ -53,8 +60,7 @@ to encrypt the data as it saved to S3. It remains encrypted on S3 until deleted: clients cannot change the encryption attributes of an object once uploaded. The Amazon AWS SDK also offers client-side encryption, in which all the encoding -and decoding of data is performed on the client. This is *not* supported by -the S3A client. +and decoding of data is performed on the client. The server-side "SSE" encryption is performed with symmetric AES256 encryption; S3 offers different mechanisms for actually defining the key to use. @@ -70,6 +76,53 @@ by Amazon's Key Management Service, a key referenced by name in the uploading cl * SSE-C : the client specifies an actual base64 encoded AES-256 key to be used to encrypt and decrypt the data. +Encryption options + +| type | encryption | config on write | config on read | +|-------|---------|-----------------|----------------| +| `SSE-S3` | server side, AES256 | encryption algorithm | none | +| `SSE-KMS` | server side, KMS key | key used to encrypt/decrypt | none | +| `SSE-C` | server side, custom key | encryption algorithm and secret | encryption algorithm and secret | +| `CSE-KMS` | client side, KMS key | encryption algorithm and key ID | encryption algorithm | + +With server-side encryption, the data is uploaded to S3 unencrypted (but wrapped by the HTTPS +encryption channel). +The data is encrypted in the S3 store and decrypted when it's being retrieved. + +A server side algorithm can be enabled by default for a bucket, so that +whenever data is uploaded unencrypted a default encryption algorithm is added. +When data is encrypted with S3-SSE or SSE-KMS it is transparent to all clients +downloading the data. +SSE-C is different in that every client must know the secret key needed to decypt the data. + +Working with SSE-C data is harder because every client must be configured to +use the algorithm and supply the key. In particular, it is very hard to mix +SSE-C encrypted objects in the same S3 bucket with objects encrypted with +other algorithms or unencrypted; The S3A client (and other applications) get +very confused. + +KMS-based key encryption is powerful as access to a key can be restricted to +specific users/IAM roles. However, use of the key is billed and can be +throttled. Furthermore as a client seeks around a file, the KMS key *may* be +used multiple times. + +S3 Client side encryption (CSE-KMS) is an experimental feature added in July +2021. + +This encrypts the data on the client, before transmitting to S3, where it is +stored encrypted. The data is unencrypted after downloading when it is being +read back. + +In CSE-KMS, the ID of an AWS-KMS key is provided to the S3A client; +the client communicates with AWS-KMS to request a new encryption key, which +KMS returns along with the same key encrypted with the KMS key. +The S3 client encrypts the payload *and* attaches the KMS-encrypted version +of the key as a header to the object. + +When downloading data, this header is extracted, passed to AWS KMS, and, +if the client has the appropriate permissions, the symmetric key is +retrieved. +This key is then used to decode the data. ## S3 Default Encryption @@ -110,7 +163,7 @@ To learn more, refer to [Protecting Data Using Server-Side Encryption with Amazon S3-Managed Encryption Keys (SSE-S3) in AWS documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). -### SSE-KMS: Amazon S3-KMS Managed Encryption Keys +### SSE-KMS: Server-Encryption with KMS Managed Encryption Keys Amazon offers a pay-per-use key management service, [AWS KMS](https://aws.amazon.com/documentation/kms/). @@ -419,7 +472,82 @@ the data, so guaranteeing that access to thee data can be read by everyone granted access to that key, and nobody without access to it. -### Use rename() to encrypt files with new keys +### Using `hadoop fs -getfattr` to view encryption information. + +The S3A client retrieves all HTTP headers from an object and returns +them in the "XAttr" list of attributed, prefixed with `header.`. +This makes them retrievable in the `getXAttr()` API calls, which +is available on the command line through the `hadoop fs -getfattr -d` command. + +This makes viewing the encryption headers of a file straightforward. + +Here is an example of the operation invoked on a file where the client is using CSE-KMS: +``` +bin/hadoop fs -getfattr -d s3a://test-london/file2 + +2021-07-14 12:59:01,554 [main] WARN s3.AmazonS3EncryptionClientV2 (AmazonS3EncryptionClientV2.java:warnOnLegacyCryptoMode(409)) - The S3 Encryption Client is configured to read encrypted data with legacy encryption modes through the CryptoMode setting. If you don't have objects encrypted with these legacy modes, you should disable support for them to enhance security. See https://docs.aws.amazon.com/general/latest/gr/aws_sdk_cryptography.html +2021-07-14 12:59:01,558 [main] WARN s3.AmazonS3EncryptionClientV2 (AmazonS3EncryptionClientV2.java:warnOnRangeGetsEnabled(401)) - The S3 Encryption Client is configured to support range get requests. Range gets do not provide authenticated encryption properties even when used with an authenticated mode (AES-GCM). See https://docs.aws.amazon.com/general/latest/gr/aws_sdk_cryptography.html +# file: s3a://test-london/file2 +header.Content-Length="0" +header.Content-Type="application/octet-stream" +header.ETag="63b3f4bd6758712c98f1be86afad095a" +header.Last-Modified="Wed Jul 14 12:56:06 BST 2021" +header.x-amz-cek-alg="AES/GCM/NoPadding" +header.x-amz-iv="ZfrgtxvcR41yNVkw" +header.x-amz-key-v2="AQIDAHjZrfEIkNG24/xy/pqPPLcLJulg+O5pLgbag/745OH4VQFuiRnSS5sOfqXJyIa4FOvNAAAAfjB8BgkqhkiG9w0BBwagbzBtAgEAMGgGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM7b5GZzD4eAGTC6gaAgEQgDte9Et/dhR7LAMU/NaPUZSgXWN5pRnM4hGHBiRNEVrDM+7+iotSTKxFco+VdBAzwFZfHjn0DOoSZEukNw==" +header.x-amz-matdesc="{"aws:x-amz-cek-alg":"AES/GCM/NoPadding"}" +header.x-amz-server-side-encryption="AES256" +header.x-amz-tag-len="128" +header.x-amz-unencrypted-content-length="0" +header.x-amz-version-id="zXccFCB9eICszFgqv_paG1pzaUKY09Xa" +header.x-amz-wrap-alg="kms+context" +``` + +Analysis + +1. The WARN commands are the AWS SDK warning that because the S3A client uses +an encryption algorithm which seek() requires, the SDK considers it less +secure than the most recent algorithm(s). Ignore. + +* `header.x-amz-server-side-encryption="AES256"` : the file has been encrypted with S3-SSE. This is set up as the S3 default encryption, +so even when CSE is enabled, the data is doubly encrypted. +* `header.x-amz-cek-alg="AES/GCM/NoPadding`: client-side encrypted with the `"AES/GCM/NoPadding` algorithm. +* `header.x-amz-iv="ZfrgtxvcR41yNVkw"`: +* `header.x-amz-key-v2="AQIDAHjZrfEIkNG24/xy/pqPPLcLJulg+O5pLgbag/745OH4VQFuiRnSS5sOfqXJyIa4FOvNAAAAfjB8BgkqhkiG9w0BBwagbzBtAgEAMGgGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM7b5GZzD4eAGTC6gaAgEQgDte9Et/dhR7LAMU/NaPUZSgXWN5pRnM4hGHBiRNEVrDM+7+iotSTKxFco+VdBAzwFZfHjn0DOoSZEukNw=="`: +* `header.x-amz-unencrypted-content-length="0"`: this is the length of the unencrypted data. The S3A client *DOES NOT* use this header; +* `header.x-amz-wrap-alg="kms+context"`: the algorithm used to encrypt the CSE key + it always removes 16 bytes from non-empty files when declaring the length. +* `header.x-amz-version-id="zXccFCB9eICszFgqv_paG1pzaUKY09Xa"`: the bucket is versioned; this is the version ID. + +And a directory encrypted with S3-SSE only: + +``` +bin/hadoop fs -getfattr -d s3a://test-london/user/stevel/target/test/data/sOCOsNgEjv + +# file: s3a://test-london/user/stevel/target/test/data/sOCOsNgEjv +header.Content-Length="0" +header.Content-Type="application/x-directory" +header.ETag="d41d8cd98f00b204e9800998ecf8427e" +header.Last-Modified="Tue Jul 13 20:12:07 BST 2021" +header.x-amz-server-side-encryption="AES256" +header.x-amz-version-id="KcDOVmznIagWx3gP1HlDqcZvm1mFWZ2a" +``` + +A file with no-encryption (on a bucket without versioning but with intelligent tiering): + +``` +bin/hadoop fs -getfattr -d s3a://landsat-pds/scene_list.gz + +# file: s3a://landsat-pds/scene_list.gz +header.Content-Length="45603307" +header.Content-Type="application/octet-stream" +header.ETag="39c34d489777a595b36d0af5726007db" +header.Last-Modified="Wed Aug 29 01:45:15 BST 2018" +header.x-amz-storage-class="INTELLIGENT_TIERING" +header.x-amz-version-id="null" +``` + +### Use `rename()` to encrypt files with new keys The encryption of an object is set when it is uploaded. If you want to encrypt an unencrypted file, or change the SEE-KMS key of a file, the only way to do @@ -435,6 +563,91 @@ for reading as for writing, and you must supply that key for reading. There you need to copy one bucket to a different bucket, one with a different key. Use `distCp`for this, with per-bucket encryption policies. +## Amazon S3 Client Side Encryption + +### Introduction +Amazon S3 Client Side Encryption(S3-CSE), is used to encrypt data on the +client-side and then transmit it over to S3 storage. The same encrypted data +is then transmitted over to client while reading and then +decrypted on the client-side. + +S3-CSE, uses `AmazonS3EncryptionClientV2.java` as the AmazonS3 client. The +encryption and decryption is done by AWS SDK. As of July 2021, Only CSE-KMS +method is supported. + +A key reason this feature (HADOOP-13887) has been unavailable for a long time +is that the AWS S3 client pads uploaded objects with a 16 byte footer. This +meant that files were shorter when being read than when are listed them +through any of the list API calls/getFileStatus(). Which broke many +applications, including anything seeking near the end of a file to read a +footer, as ORC and Parquet do. + +There is now a workaround: compensate for the footer in listings when CSE is enabled. + +- When listing files and directories, 16 bytes are subtracted from the length +of all non-empty objects( greater than or equal to 16 bytes). +- Directory markers MAY be longer than 0 bytes long. + +This "appears" to work; secondly it does in the testing as of July 2021. However +, the length of files when listed through the S3A client is now going to be +shorter than the length of files listed with other clients -including S3A +clients where S3-CSE has not been enabled. + +### Features + +- Supports client side encryption with keys managed in AWS KMS. +- encryption settings propagated into jobs through any issued delegation tokens. +- encryption information stored as headers in the uploaded object. + +### Limitations + +- Performance will be reduced. All encrypt/decrypt is now being done on the + client. +- Writing files may be slower, as only a single block can be encrypted and + uploaded at a time. +- Multipart Uploader API disabled. +- S3 Select is not supported. +- Multipart uploads would be serial, and partSize must be a multiple of 16 + bytes. +- maximum message size in bytes that can be encrypted under this mode is + 2^36-32, or ~64G, due to the security limitation of AES/GCM as recommended by + NIST. + +### Setup +- Generate an AWS KMS Key ID from AWS console for your bucket, with same + region as the storage bucket. +- If already created, [view the kms key ID by these steps.](https://docs.aws.amazon.com/kms/latest/developerguide/find-cmk-id-arn.html) +- Set `fs.s3a.server-side-encryption-algorithm=CSE-KMS`. +- Set `fs.s3a.server-side-encryption.key=`. + +KMS_KEY_ID: + +Identifies the symmetric CMK that encrypts the data key. +To specify a CMK, use its key ID, key ARN, alias name, or alias ARN. When +using an alias name, prefix it with "alias/". To specify a CMK in a +different AWS account, you must use the key ARN or alias ARN. + +For example: +- Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` +- Key ARN: `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` +- Alias name: `alias/ExampleAlias` +- Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + +*Note:* If `fs.s3a.server-side-encryption-algorithm=CSE-KMS` is set, +`fs.s3a.server-side-encryption.key=` property must be set for +S3-CSE to work. + +```xml + + fs.s3a.server-side-encryption-algorithm + CSE-KMS + + + + fs.s3a.server-side-encryption.key + ${KMS_KEY_ID} + +``` ## Troubleshooting Encryption diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md index 30047edfb5c38..6cdb492d885fc 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md @@ -1157,6 +1157,284 @@ is used, no encryption is specified, or the SSE-C specified is incorrect. 2. A directory is encrypted with a SSE-C keyA and the user is trying to move a file using configured SSE-C keyB into that structure. +## S3 Client Side Encryption + +### Instruction file not found for S3 object + +Reading an unencrypted file would fail when read through CSE enabled client. +``` +java.lang.SecurityException: Instruction file not found for S3 object with bucket name: ap-south-cse, key: unencryptedData.txt + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleAE.decipher(S3CryptoModuleAE.java:190) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleAE.getObjectSecurely(S3CryptoModuleAE.java:136) + at com.amazonaws.services.s3.AmazonS3EncryptionClientV2.getObject(AmazonS3EncryptionClientV2.java:241) + at org.apache.hadoop.fs.s3a.S3AFileSystem$InputStreamCallbacksImpl.getObject(S3AFileSystem.java:1462) + at org.apache.hadoop.fs.s3a.S3AInputStream.lambda$reopen$0(S3AInputStream.java:217) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:117) + at org.apache.hadoop.fs.s3a.S3AInputStream.reopen(S3AInputStream.java:216) + at org.apache.hadoop.fs.s3a.S3AInputStream.lambda$lazySeek$1(S3AInputStream.java:382) + at org.apache.hadoop.fs.s3a.Invoker.lambda$maybeRetry$3(Invoker.java:230) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:117) + at org.apache.hadoop.fs.s3a.Invoker.lambda$maybeRetry$5(Invoker.java:354) + at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:414) + at org.apache.hadoop.fs.s3a.Invoker.maybeRetry(Invoker.java:350) + at org.apache.hadoop.fs.s3a.Invoker.maybeRetry(Invoker.java:228) + at org.apache.hadoop.fs.s3a.Invoker.maybeRetry(Invoker.java:272) + at org.apache.hadoop.fs.s3a.S3AInputStream.lazySeek(S3AInputStream.java:374) + at org.apache.hadoop.fs.s3a.S3AInputStream.read(S3AInputStream.java:493) + at java.io.DataInputStream.read(DataInputStream.java:100) + at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:94) + at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:68) + at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:129) + at org.apache.hadoop.fs.shell.Display$Cat.printToStdout(Display.java:101) + at org.apache.hadoop.fs.shell.Display$Cat.processPath(Display.java:96) + at org.apache.hadoop.fs.shell.Command.processPathInternal(Command.java:370) + at org.apache.hadoop.fs.shell.Command.processPaths(Command.java:333) + at org.apache.hadoop.fs.shell.Command.processPathArgument(Command.java:306) + at org.apache.hadoop.fs.shell.Command.processArgument(Command.java:288) + at org.apache.hadoop.fs.shell.Command.processArguments(Command.java:272) + at org.apache.hadoop.fs.shell.FsCommand.processRawArguments(FsCommand.java:121) + at org.apache.hadoop.fs.shell.Command.run(Command.java:179) + at org.apache.hadoop.fs.FsShell.run(FsShell.java:327) + at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:81) + at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:95) + at org.apache.hadoop.fs.FsShell.main(FsShell.java:390) +``` +CSE enabled client should read encrypted data only. + +### CSE-KMS method requires KMS key ID + +KMS key ID is required for CSE-KMS to encrypt data, not providing one leads + to failure. + +``` +2021-07-07 11:33:04,550 WARN fs.FileSystem: Failed to initialize fileystem +s3a://ap-south-cse/: java.lang.IllegalArgumentException: CSE-KMS +method requires KMS key ID. Use fs.s3a.server-side-encryption.key property to set it. +-ls: CSE-KMS method requires KMS key ID. Use fs.s3a.server-side-encryption.key property to + set it. +``` + +set `fs.s3a.server-side-encryption.key=` generated through AWS console. + +### `com.amazonaws.services.kms.model.IncorrectKeyException` The key ID in the request does not identify a CMK that can perform this operation. + +KMS key ID used to PUT(encrypt) the data, must be the one used to GET the +data. + ``` +cat: open s3a://ap-south-cse/encryptedData.txt at 0 on +s3a://ap-south-cse/encryptedData.txt: +com.amazonaws.services.kms.model.IncorrectKeyException: The key ID in the +request does not identify a CMK that can perform this operation. (Service: AWSKMS; +Status Code: 400; ErrorCode: IncorrectKeyException; +Request ID: da21aa8a-f00d-467c-94a0-32b627d32bc0; Proxy: null):IncorrectKeyException: +The key ID in the request does not identify a CMK that can perform this +operation. (Service: AWSKMS ; Status Code: 400; Error Code: IncorrectKeyException; +Request ID: da21aa8a-f00d-467c-94a0-32b627d32bc0; Proxy: null) +``` +Use the same KMS key ID used to upload data to download and read it as well. + +### `com.amazonaws.services.kms.model.NotFoundException` key/ does not exist + +Using a KMS key ID from a different region than the bucket used to store data + would lead to failure while uploading. + +``` +mkdir: PUT 0-byte object on testmkdir: +com.amazonaws.services.kms.model.NotFoundException: Key +'arn:aws:kms:ap-south-1:152813717728:key/' +does not exist (Service: AWSKMS; Status Code: 400; Error Code: NotFoundException; +Request ID: 279db85d-864d-4a38-9acd-d892adb504c0; Proxy: null):NotFoundException: +Key 'arn:aws:kms:ap-south-1:152813717728:key/' +does not exist(Service: AWSKMS; Status Code: 400; Error Code: NotFoundException; +Request ID: 279db85d-864d-4a38-9acd-d892adb504c0; Proxy: null) +``` +While generating the KMS Key ID make sure to generate it in the same region + as your bucket. + +### Unable to perform range get request: Range get support has been disabled + +If Range get is not supported for a CSE algorithm or is disabled: +``` +java.lang.SecurityException: Unable to perform range get request: Range get support has been disabled. See https://docs.aws.amazon.com/general/latest/gr/aws_sdk_cryptography.html + + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleAE.assertCanGetPartialObject(S3CryptoModuleAE.java:446) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleAE.getObjectSecurely(S3CryptoModuleAE.java:117) + at com.amazonaws.services.s3.AmazonS3EncryptionClientV2.getObject(AmazonS3EncryptionClientV2.java:241) + at org.apache.hadoop.fs.s3a.S3AFileSystem$InputStreamCallbacksImpl.getObject(S3AFileSystem.java:1462) + at org.apache.hadoop.fs.s3a.S3AInputStream.lambda$reopen$0(S3AInputStream.java:217) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:117) + at org.apache.hadoop.fs.s3a.S3AInputStream.reopen(S3AInputStream.java:216) + at org.apache.hadoop.fs.s3a.S3AInputStream.lambda$lazySeek$1(S3AInputStream.java:382) + at org.apache.hadoop.fs.s3a.Invoker.lambda$maybeRetry$3(Invoker.java:230) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:117) + at org.apache.hadoop.fs.s3a.Invoker.lambda$maybeRetry$5(Invoker.java:354) + at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:414) + at org.apache.hadoop.fs.s3a.Invoker.maybeRetry(Invoker.java:350) + at org.apache.hadoop.fs.s3a.Invoker.maybeRetry(Invoker.java:228) + at org.apache.hadoop.fs.s3a.Invoker.maybeRetry(Invoker.java:272) + at org.apache.hadoop.fs.s3a.S3AInputStream.lazySeek(S3AInputStream.java:374) + at org.apache.hadoop.fs.s3a.S3AInputStream.read(S3AInputStream.java:408) + at java.io.DataInputStream.readByte(DataInputStream.java:265) +``` +Range gets must be enabled for CSE to work. + +### WARNING: Range gets do not provide authenticated encryption properties even when used with an authenticated mode (AES-GCM). + +The S3 Encryption Client is configured to support range get requests. This + warning would be shown everytime S3-CSE is used. +``` +2021-07-14 12:54:09,525 [main] WARN s3.AmazonS3EncryptionClientV2 +(AmazonS3EncryptionClientV2.java:warnOnRangeGetsEnabled(401)) - The S3 +Encryption Client is configured to support range get requests. Range gets do +not provide authenticated encryption properties even when used with an +authenticated mode (AES-GCM). See https://docs.aws.amazon.com/general/latest +/gr/aws_sdk_cryptography.html +``` +We can Ignore this warning since, range gets must be enabled for S3-CSE to +get data. + +### WARNING: If you don't have objects encrypted with these legacy modes, you should disable support for them to enhance security. + +The S3 Encryption Client is configured to read encrypted data with legacy +encryption modes through the CryptoMode setting, and we would see this +warning for all S3-CSE request. + +``` +2021-07-14 12:54:09,519 [main] WARN s3.AmazonS3EncryptionClientV2 +(AmazonS3EncryptionClientV2.java:warnOnLegacyCryptoMode(409)) - The S3 +Encryption Client is configured to read encrypted data with legacy +encryption modes through the CryptoMode setting. If you don't have objects +encrypted with these legacy modes, you should disable support for them to +enhance security. See https://docs.aws.amazon.com/general/latest/gr/aws_sdk_cryptography.html +``` +We can ignore this, since this CryptoMode setting(CryptoMode.AuthenticatedEncryption) +is required for range gets to work. + +### com.amazonaws.services.kms.model.InvalidKeyUsageException: You cannot generate a data key with an asymmetric CMK + +If you generated an Asymmetric CMK from AWS console then CSE-KMS won't be +able to generate unique data key for encryption. + +``` +Caused by: com.amazonaws.services.kms.model.InvalidKeyUsageException: +You cannot generate a data key with an asymmetric CMK +(Service: AWSKMS; Status Code: 400; Error Code: InvalidKeyUsageException; Request ID: 93609c15-e490-4035-8390-f4396f0d90bf; Proxy: null) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1819) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleServiceErrorResponse(AmazonHttpClient.java:1403) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1372) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1145) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:802) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:770) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:744) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:704) + at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:686) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:550) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:530) + at com.amazonaws.services.kms.AWSKMSClient.doInvoke(AWSKMSClient.java:7223) + at com.amazonaws.services.kms.AWSKMSClient.invoke(AWSKMSClient.java:7190) + at com.amazonaws.services.kms.AWSKMSClient.invoke(AWSKMSClient.java:7179) + at com.amazonaws.services.kms.AWSKMSClient.executeGenerateDataKey(AWSKMSClient.java:3482) + at com.amazonaws.services.kms.AWSKMSClient.generateDataKey(AWSKMSClient.java:3451) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.buildContentCryptoMaterial(S3CryptoModuleBase.java:533) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.newContentCryptoMaterial(S3CryptoModuleBase.java:481) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.createContentCryptoMaterial(S3CryptoModuleBase.java:447) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.putObjectUsingMetadata(S3CryptoModuleBase.java:160) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.putObjectSecurely(S3CryptoModuleBase.java:156) + at com.amazonaws.services.s3.AmazonS3EncryptionClientV2.putObject(AmazonS3EncryptionClientV2.java:236) + at org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$putObjectDirect$17(S3AFileSystem.java:2792) + at org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfSupplier(IOStatisticsBinding.java:604) + at org.apache.hadoop.fs.s3a.S3AFileSystem.putObjectDirect(S3AFileSystem.java:2789) + at org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$createEmptyObject$33(S3AFileSystem.java:4440) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:117) + ... 49 more +``` + +Generate a Symmetric Key in the same region as your S3 storage for CSE-KMS to +work. + +### com.amazonaws.services.kms.model.NotFoundException: Invalid keyId + +If the value in `fs.s3a.server-side-encryption.key` property, does not exist +/valid in AWS KMS CMK(Customer managed keys), then this error would be seen. + +``` +Caused by: com.amazonaws.services.kms.model.NotFoundException: Invalid keyId abc +(Service: AWSKMS; Status Code: 400; Error Code: NotFoundException; Request ID: 9d53552a-3d1b-47c8-984c-9a599d5c2391; Proxy: null) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1819) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleServiceErrorResponse(AmazonHttpClient.java:1403) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1372) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1145) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:802) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:770) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:744) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:704) + at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:686) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:550) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:530) + at com.amazonaws.services.kms.AWSKMSClient.doInvoke(AWSKMSClient.java:7223) + at com.amazonaws.services.kms.AWSKMSClient.invoke(AWSKMSClient.java:7190) + at com.amazonaws.services.kms.AWSKMSClient.invoke(AWSKMSClient.java:7179) + at com.amazonaws.services.kms.AWSKMSClient.executeGenerateDataKey(AWSKMSClient.java:3482) + at com.amazonaws.services.kms.AWSKMSClient.generateDataKey(AWSKMSClient.java:3451) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.buildContentCryptoMaterial(S3CryptoModuleBase.java:533) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.newContentCryptoMaterial(S3CryptoModuleBase.java:481) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.createContentCryptoMaterial(S3CryptoModuleBase.java:447) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.putObjectUsingMetadata(S3CryptoModuleBase.java:160) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.putObjectSecurely(S3CryptoModuleBase.java:156) + at com.amazonaws.services.s3.AmazonS3EncryptionClientV2.putObject(AmazonS3EncryptionClientV2.java:236) + at org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$putObjectDirect$17(S3AFileSystem.java:2792) + at org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfSupplier(IOStatisticsBinding.java:604) + at org.apache.hadoop.fs.s3a.S3AFileSystem.putObjectDirect(S3AFileSystem.java:2789) + at org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$createEmptyObject$33(S3AFileSystem.java:4440) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:117) + ... 49 more +``` + +Check if `fs.s3a.server-side-encryption.key` is set correctly and matches the +same on AWS console. + +### com.amazonaws.services.kms.model.AWSKMSException: User: is not authorized to perform : kms :GenerateDataKey on resource: + +User doesn't have authorization to the specific AWS KMS Key ID. +``` +Caused by: com.amazonaws.services.kms.model.AWSKMSException: +User: arn:aws:iam::152813717728:user/ is not authorized to perform: kms:GenerateDataKey on resource: +(Service: AWSKMS; Status Code: 400; Error Code: AccessDeniedException; Request ID: 4ded9f1f-b245-4213-87fc-16cba7a1c4b9; Proxy: null) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1819) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleServiceErrorResponse(AmazonHttpClient.java:1403) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1372) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1145) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:802) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:770) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:744) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:704) + at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:686) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:550) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:530) + at com.amazonaws.services.kms.AWSKMSClient.doInvoke(AWSKMSClient.java:7223) + at com.amazonaws.services.kms.AWSKMSClient.invoke(AWSKMSClient.java:7190) + at com.amazonaws.services.kms.AWSKMSClient.invoke(AWSKMSClient.java:7179) + at com.amazonaws.services.kms.AWSKMSClient.executeGenerateDataKey(AWSKMSClient.java:3482) + at com.amazonaws.services.kms.AWSKMSClient.generateDataKey(AWSKMSClient.java:3451) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.buildContentCryptoMaterial(S3CryptoModuleBase.java:533) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.newContentCryptoMaterial(S3CryptoModuleBase.java:481) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.createContentCryptoMaterial(S3CryptoModuleBase.java:447) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.putObjectUsingMetadata(S3CryptoModuleBase.java:160) + at com.amazonaws.services.s3.internal.crypto.v2.S3CryptoModuleBase.putObjectSecurely(S3CryptoModuleBase.java:156) + at com.amazonaws.services.s3.AmazonS3EncryptionClientV2.putObject(AmazonS3EncryptionClientV2.java:236) + at org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$putObjectDirect$17(S3AFileSystem.java:2792) + at org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfSupplier(IOStatisticsBinding.java:604) + at org.apache.hadoop.fs.s3a.S3AFileSystem.putObjectDirect(S3AFileSystem.java:2789) + at org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$createEmptyObject$33(S3AFileSystem.java:4440) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:117) + ... 49 more +``` + +The user trying to use the KMS Key ID should have the right permissions to access +(encrypt/decrypt) using the AWS KMS Key used via `fs.s3a.server-side-encryption.key`. +If not, then add permission(or IAM role) in "Key users" section by selecting the +AWS-KMS CMK Key on AWS console. + ### Message appears in logs "Not all bytes were read from the S3ObjectInputStream" diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java index 4e5c452d3ff6f..242919192f19f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java @@ -32,6 +32,7 @@ import org.apache.hadoop.io.IOUtils; import org.junit.AfterClass; +import org.junit.Assume; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -259,4 +260,12 @@ protected AuditSpan span(AuditSpanSource source) throws IOException { return source.createSpan(getMethodName(), null, null); } + + /** + * Method to assume that S3 client side encryption is disabled on a test. + */ + public void skipIfClientSideEncryption() { + Assume.assumeTrue("Skipping test if CSE is enabled", + !getFileSystem().isCSEEnabled()); + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/EncryptionTestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/EncryptionTestUtils.java index f9cfc0442e828..4fc03e0b2e7bf 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/EncryptionTestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/EncryptionTestUtils.java @@ -51,7 +51,7 @@ private EncryptionTestUtils() { */ public static String convertKeyToMd5(FileSystem fs) { String base64Key = fs.getConf().getTrimmed( - SERVER_SIDE_ENCRYPTION_KEY + SERVER_SIDE_ENCRYPTION_KEY ); byte[] key = Base64.decodeBase64(base64Key); byte[] md5 = DigestUtils.md5(key); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java new file mode 100644 index 0000000000000..880f032d47650 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java @@ -0,0 +1,301 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.assertj.core.api.Assertions; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.statistics.IOStatisticAssertions; +import org.apache.hadoop.fs.statistics.IOStatistics; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; +import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; +import static org.apache.hadoop.fs.contract.ContractTestUtils.rm; +import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyFileContents; +import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; +import static org.apache.hadoop.fs.s3a.Constants.MULTIPART_MIN_SIZE; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_KEY; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestPropertyBool; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +/** + * Tests to verify S3 Client-Side Encryption (CSE). + */ +public abstract class ITestS3AClientSideEncryption extends AbstractS3ATestBase { + + private static final List SIZES = + new ArrayList<>(Arrays.asList(0, 1, 255, 4095)); + + private static final int BIG_FILE_SIZE = 15 * 1024 * 1024; + private static final int SMALL_FILE_SIZE = 1024; + + /** + * Testing S3 CSE on different file sizes. + */ + @Test + public void testEncryption() throws Throwable { + describe("Test to verify client-side encryption for different file sizes."); + for (int size : SIZES) { + validateEncryptionForFileSize(size); + } + } + + /** + * Testing the S3 client side encryption over rename operation. + */ + @Test + public void testEncryptionOverRename() throws Throwable { + describe("Test for AWS CSE on Rename Operation."); + maybeSkipTest(); + S3AFileSystem fs = getFileSystem(); + Path src = path(getMethodName()); + byte[] data = dataset(SMALL_FILE_SIZE, 'a', 'z'); + writeDataset(fs, src, data, data.length, SMALL_FILE_SIZE, + true, false); + + ContractTestUtils.verifyFileContents(fs, src, data); + Path dest = path(src.getName() + "-copy"); + fs.rename(src, dest); + ContractTestUtils.verifyFileContents(fs, dest, data); + assertEncrypted(dest); + } + + /** + * Test to verify if we get same content length of files in S3 CSE using + * listStatus and listFiles on the parent directory. + */ + @Test + public void testDirectoryListingFileLengths() throws IOException { + describe("Test to verify directory listing calls gives correct content " + + "lengths"); + maybeSkipTest(); + S3AFileSystem fs = getFileSystem(); + Path parentDir = path(getMethodName()); + + // Creating files in the parent directory that will be used to assert + // content length. + for (int i : SIZES) { + Path child = new Path(parentDir, getMethodName() + i); + writeThenReadFile(child, i); + } + + // Getting the content lengths of files inside the directory via FileStatus. + List fileLengthDirListing = new ArrayList<>(); + for (FileStatus fileStatus : fs.listStatus(parentDir)) { + fileLengthDirListing.add((int) fileStatus.getLen()); + } + // Assert the file length we got against expected file length for + // ListStatus. + Assertions.assertThat(fileLengthDirListing) + .describedAs("File lengths aren't the same " + + "as expected from FileStatus dir. listing") + .containsExactlyInAnyOrderElementsOf(SIZES); + + // Getting the content lengths of files inside the directory via ListFiles. + RemoteIterator listDir = fs.listFiles(parentDir, true); + List fileLengthListLocated = new ArrayList<>(); + while (listDir.hasNext()) { + LocatedFileStatus fileStatus = listDir.next(); + fileLengthListLocated.add((int) fileStatus.getLen()); + } + // Assert the file length we got against expected file length for + // LocatedFileStatus. + Assertions.assertThat(fileLengthListLocated) + .describedAs("File lengths isn't same " + + "as expected from LocatedFileStatus dir. listing") + .containsExactlyInAnyOrderElementsOf(SIZES); + + } + + /** + * Test to verify multipart upload through S3ABlockOutputStream and + * verifying the contents of the uploaded file. + */ + @Test + public void testBigFilePutAndGet() throws IOException { + maybeSkipTest(); + assume("Scale test disabled: to enable set property " + + KEY_SCALE_TESTS_ENABLED, getTestPropertyBool( + getConfiguration(), + KEY_SCALE_TESTS_ENABLED, + DEFAULT_SCALE_TESTS_ENABLED)); + S3AFileSystem fs = getFileSystem(); + Path filePath = path(getMethodName()); + byte[] fileContent = dataset(BIG_FILE_SIZE, 'a', 26); + int offsetSeek = fileContent[BIG_FILE_SIZE - 4]; + + // PUT a 15MB file using CSE to force multipart in CSE. + createFile(fs, filePath, true, fileContent); + LOG.info("Multi-part upload successful..."); + + try (FSDataInputStream in = fs.open(filePath)) { + // Verify random IO. + in.seek(BIG_FILE_SIZE - 4); + assertEquals("Byte at a specific position not equal to actual byte", + offsetSeek, in.read()); + in.seek(0); + assertEquals("Byte at a specific position not equal to actual byte", + 'a', in.read()); + + // Verify seek-read between two multipart blocks. + in.seek(MULTIPART_MIN_SIZE - 1); + int byteBeforeBlockEnd = fileContent[MULTIPART_MIN_SIZE]; + assertEquals("Byte before multipart block end mismatch", + byteBeforeBlockEnd - 1, in.read()); + assertEquals("Byte at multipart end mismatch", + byteBeforeBlockEnd, in.read()); + assertEquals("Byte after multipart end mismatch", + byteBeforeBlockEnd + 1, in.read()); + + // Verify end of file seek read. + in.seek(BIG_FILE_SIZE + 1); + assertEquals("Byte at eof mismatch", + -1, in.read()); + + // Verify full read. + in.readFully(0, fileContent); + verifyFileContents(fs, filePath, fileContent); + } + } + + /** + * Testing how unencrypted and encrypted data behaves when read through + * CSE enabled and disabled FS respectively. + */ + @Test + public void testEncryptionEnabledAndDisabledFS() throws Exception { + maybeSkipTest(); + S3AFileSystem cseDisabledFS = new S3AFileSystem(); + Configuration cseDisabledConf = getConfiguration(); + S3AFileSystem cseEnabledFS = getFileSystem(); + Path unEncryptedFilePath = path(getMethodName()); + Path encryptedFilePath = path(getMethodName() + "cse"); + + // Initialize a CSE disabled FS. + cseDisabledConf.unset(SERVER_SIDE_ENCRYPTION_ALGORITHM); + cseDisabledConf.unset(SERVER_SIDE_ENCRYPTION_KEY); + cseDisabledFS.initialize(getFileSystem().getUri(), + cseDisabledConf); + + // Verifying both FS instances using an IOStat gauge. + IOStatistics cseDisabledIOStats = cseDisabledFS.getIOStatistics(); + IOStatistics cseEnabledIOStatistics = cseEnabledFS.getIOStatistics(); + IOStatisticAssertions.assertThatStatisticGauge(cseDisabledIOStats, + Statistic.CLIENT_SIDE_ENCRYPTION_ENABLED.getSymbol()).isEqualTo(0L); + IOStatisticAssertions.assertThatStatisticGauge(cseEnabledIOStatistics, + Statistic.CLIENT_SIDE_ENCRYPTION_ENABLED.getSymbol()).isEqualTo(1L); + + // Unencrypted data written to a path. + try (FSDataOutputStream out = cseDisabledFS.create(unEncryptedFilePath)) { + out.write(new byte[SMALL_FILE_SIZE]); + } + + // CSE enabled FS trying to read unencrypted data would face an exception. + try (FSDataInputStream in = cseEnabledFS.open(unEncryptedFilePath)) { + FileStatus encryptedFSFileStatus = + cseEnabledFS.getFileStatus(unEncryptedFilePath); + assertEquals("Mismatch in content length bytes", SMALL_FILE_SIZE, + encryptedFSFileStatus.getLen()); + + intercept(SecurityException.class, "", + "SecurityException should be thrown", + () -> { + in.read(new byte[SMALL_FILE_SIZE]); + return "Exception should be raised if unencrypted data is read by " + + "a CSE enabled FS"; + }); + } + + // Encrypted data written to a path. + try (FSDataOutputStream out = cseEnabledFS.create(encryptedFilePath)) { + out.write('a'); + } + + // CSE disabled FS tries to read encrypted data. + try (FSDataInputStream in = cseDisabledFS.open(encryptedFilePath)) { + FileStatus unEncryptedFSFileStatus = + cseDisabledFS.getFileStatus(encryptedFilePath); + // Due to padding and encryption, content written and length shouldn't be + // equal to what a CSE disabled FS would read. + assertNotEquals("Mismatch in content length", 1, + unEncryptedFSFileStatus.getLen()); + Assertions.assertThat(in.read()) + .describedAs("Encrypted data shouldn't be equal to actual content " + + "without deciphering") + .isNotEqualTo('a'); + } + } + + @Override + protected Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + S3ATestUtils.removeBaseAndBucketOverrides(conf, Constants.MULTIPART_SIZE, + Constants.MIN_MULTIPART_THRESHOLD); + // To force multi part put and get in small files, we'll set the + // threshold and part size to 5MB. + conf.set(Constants.MULTIPART_SIZE, + String.valueOf(MULTIPART_MIN_SIZE)); + conf.set(Constants.MIN_MULTIPART_THRESHOLD, + String.valueOf(MULTIPART_MIN_SIZE)); + return conf; + } + + /** + * Method to validate CSE for different file sizes. + * + * @param len length of the file. + */ + protected void validateEncryptionForFileSize(int len) throws IOException { + maybeSkipTest(); + describe("Create an encrypted file of size " + len); + // Creating a unique path by adding file length in file name. + Path path = writeThenReadFile(getMethodName() + len, len); + assertEncrypted(path); + rm(getFileSystem(), path, false, false); + } + + /** + * Skip tests if certain conditions are met. + */ + protected abstract void maybeSkipTest(); + + /** + * Assert that at path references an encrypted blob. + * + * @param path path + * @throws IOException on a failure + */ + protected abstract void assertEncrypted(Path path) throws IOException; + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryptionKms.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryptionKms.java new file mode 100644 index 0000000000000..35f648fd34ba1 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryptionKms.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a; + +import java.io.IOException; +import java.util.Map; + +import com.amazonaws.services.s3.Headers; +import org.assertj.core.api.Assertions; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.impl.HeaderProcessing; + +import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfEncryptionTestsDisabled; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfKmsKeyIdIsNotSet; + +/** + * Testing the S3 CSE - KMS method. + */ +public class ITestS3AClientSideEncryptionKms + extends ITestS3AClientSideEncryption { + + private static final String KMS_KEY_WRAP_ALGO = "kms+context"; + private static final String KMS_CONTENT_ENCRYPTION_ALGO = "AES/GCM/NoPadding"; + + /** + * Creating custom configs for KMS testing. + * + * @return Configuration. + */ + @Override + protected Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + S3ATestUtils.disableFilesystemCaching(conf); + return conf; + } + + @Override + protected void maybeSkipTest() { + skipIfEncryptionTestsDisabled(getConfiguration()); + skipIfKmsKeyIdIsNotSet(getConfiguration()); + } + + @Override + protected void assertEncrypted(Path path) throws IOException { + Map fsXAttrs = getFileSystem().getXAttrs(path); + String xAttrPrefix = "header."; + + // Assert KeyWrap Algo + assertEquals("Key wrap algo isn't same as expected", KMS_KEY_WRAP_ALGO, + processHeader(fsXAttrs, + xAttrPrefix + Headers.CRYPTO_KEYWRAP_ALGORITHM)); + + // Assert content encryption algo for KMS, is present in the + // materials description and KMS key ID isn't. + String keyId = + getConfiguration().get(Constants.SERVER_SIDE_ENCRYPTION_KEY); + Assertions.assertThat(processHeader(fsXAttrs, + xAttrPrefix + Headers.MATERIALS_DESCRIPTION)) + .describedAs("Materials Description should contain the content " + + "encryption algo and should not contain the KMS keyID.") + .contains(KMS_CONTENT_ENCRYPTION_ALGO) + .doesNotContain(keyId); + } + + /** + * A method to process a FS xAttribute Header key by decoding it. + * + * @param fsXAttrs Map of String(Key) and bytes[](Value) to represent fs + * xAttr. + * @param headerKey Key value of the header we are trying to process. + * @return String representing the value of key header provided. + */ + private String processHeader(Map fsXAttrs, + String headerKey) { + return HeaderProcessing.decodeBytes(fsXAttrs.get(headerKey)); + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java index 67c403e0b6d64..3a8cf7a11d666 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java @@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM; import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_KEY; import static org.apache.hadoop.fs.s3a.S3AEncryptionMethods.SSE_KMS; @@ -38,9 +39,11 @@ protected Configuration createConfiguration() { // get the KMS key for this test. Configuration c = new Configuration(); String kmsKey = c.get(SERVER_SIDE_ENCRYPTION_KEY); - if (StringUtils.isBlank(kmsKey)){ - skip(SERVER_SIDE_ENCRYPTION_KEY+ " is not set for " + - SSE_KMS.getMethod()); + if (StringUtils.isBlank(kmsKey) || !c.get(SERVER_SIDE_ENCRYPTION_ALGORITHM) + .equals(S3AEncryptionMethods.CSE_KMS.name())) { + skip(SERVER_SIDE_ENCRYPTION_KEY + " is not set for " + + SSE_KMS.getMethod() + " or CSE-KMS algorithm is used instead of " + + "SSE-KMS"); } Configuration conf = super.createConfiguration(); conf.set(SERVER_SIDE_ENCRYPTION_KEY, kmsKey); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java index c5ef65ff927ac..c7a62a39c93ee 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java @@ -57,9 +57,11 @@ public void setup() throws Exception { S3AFileSystem fs = getFileSystem(); Configuration c = fs.getConf(); String kmsKey = c.get(SERVER_SIDE_ENCRYPTION_KEY); - if (StringUtils.isBlank(kmsKey)) { + if (StringUtils.isBlank(kmsKey) || !c.get(SERVER_SIDE_ENCRYPTION_ALGORITHM) + .equals(S3AEncryptionMethods.CSE_KMS.name())) { skip(SERVER_SIDE_ENCRYPTION_KEY + " is not set for " + - SSE_KMS.getMethod()); + SSE_KMS.getMethod() + " or CSE-KMS algorithm is used instead of " + + "SSE-KMS"); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AInconsistency.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AInconsistency.java index b0e1a405158cc..a49998d50d5a6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AInconsistency.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AInconsistency.java @@ -29,6 +29,7 @@ import org.apache.hadoop.test.LambdaTestUtils; import org.junit.Assume; +import org.junit.Before; import org.junit.Test; import java.io.FileNotFoundException; @@ -63,6 +64,14 @@ public class ITestS3AInconsistency extends AbstractS3ATestBase { /** By using a power of 2 for the initial time, the total is a shift left. */ private static final int TOTAL_RETRY_DELAY = INITIAL_RETRY << RETRIES; + /** + * S3 Client side encryption when enabled should skip this test. + */ + @Before + public void setUp() { + skipIfClientSideEncryption(); + } + @Override protected Configuration createConfiguration() { Configuration conf = super.createConfiguration(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java index 496226caf3da1..daefa783ae00c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java @@ -187,6 +187,7 @@ public void testEmptyFileChecksums() throws Throwable { */ private void assumeNoDefaultEncryption() throws IOException { try { + skipIfClientSideEncryption(); Assume.assumeThat(getDefaultEncryption(), nullValue()); } catch (AccessDeniedException e) { // if the user can't check the default encryption, assume that it is @@ -254,7 +255,7 @@ public void testNonEmptyFileChecksumsUnencrypted() throws Throwable { } private S3AEncryptionMethods encryptionAlgorithm() { - return getFileSystem().getServerSideEncryptionAlgorithm(); + return getFileSystem().getS3EncryptionAlgorithm(); } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java index 17dc450707016..88a8df37c7aca 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java @@ -70,13 +70,17 @@ public void setup() throws Exception { invoker = new Invoker(new S3ARetryPolicy(getConfiguration()), Invoker.NO_OP ); + skipIfClientSideEncryption(); Assume.assumeTrue("No metadata store in test filesystem", getFileSystem().hasMetadataStore()); } @Override public void teardown() throws Exception { - clearInconsistency(getFileSystem()); + if (getFileSystem() + .getAmazonS3Client() instanceof InconsistentAmazonS3Client) { + clearInconsistency(getFileSystem()); + } super.teardown(); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java index c5670b09c3db5..458841982720f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java @@ -229,4 +229,17 @@ public interface S3ATestConstants { * every test case. */ String DIRECTORY_MARKER_AUDIT = "fs.s3a.directory.marker.audit"; + + /** + * Constant bytes being written when Client side encryption KMS is enabled + * for a test. This bytes written takes into account "EncryptionContext", + * which contains the algo used for eg: + * "aws:x-amz-cek-alg":"AES/GCM/NoPadding" , and "KeySpec", which specifies + * the length of data key. for eg: AES_256 to generate a 256-bit symmetric + * key. + * + * For test using bytesWritten as an assertion this constant value can be + * used. + */ + int KMS_KEY_GENERATION_REQUEST_PARAMS_BYTES_WRITTEN = 94; } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index 6a920972bab78..323f0ff5d2089 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -1495,4 +1495,16 @@ public static S3AFileStatus innerGetFileStatus( probes); } + /** + * Skip a test if CSE KMS key id is not set. + * + * @param configuration configuration to probe. + */ + public static void skipIfKmsKeyIdIsNotSet(Configuration configuration) { + if (configuration.get( + SERVER_SIDE_ENCRYPTION_KEY) == null) { + skip("AWS KMS key id is not set"); + } + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AInputStreamRetry.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AInputStreamRetry.java index 05a07ce444c92..b1c46e0eed8ed 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AInputStreamRetry.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AInputStreamRetry.java @@ -95,7 +95,7 @@ private S3AInputStream getMockedS3AInputStream() { fs.getBucket(), path, fs.pathToKey(path), - fs.getServerSideEncryptionAlgorithm(), + fs.getS3EncryptionAlgorithm(), new EncryptionSecrets().getEncryptionKey(), eTag, versionId, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestSSEConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestSSEConfiguration.java index a664a8bd3f204..273cf8b7c234e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestSSEConfiguration.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestSSEConfiguration.java @@ -86,14 +86,14 @@ public void testAESKeySet() throws Throwable { public void testSSEEmptyKey() { // test the internal logic of the test setup code Configuration c = buildConf(SSE_C.getMethod(), ""); - assertEquals("", getServerSideEncryptionKey(BUCKET, c)); + assertEquals("", getS3EncryptionKey(BUCKET, c)); } @Test public void testSSEKeyNull() throws Throwable { // test the internal logic of the test setup code final Configuration c = buildConf(SSE_C.getMethod(), null); - assertEquals("", getServerSideEncryptionKey(BUCKET, c)); + assertEquals("", getS3EncryptionKey(BUCKET, c)); intercept(IOException.class, SSE_C_NO_KEY_ERROR, () -> getEncryptionAlgorithm(BUCKET, c)); @@ -109,7 +109,7 @@ public void testSSEKeyFromCredentialProvider() throws Exception { // provider provisioned value instead. conf.set(SERVER_SIDE_ENCRYPTION_KEY, "keyInConfObject"); - String sseKey = getServerSideEncryptionKey(BUCKET, conf); + String sseKey = getS3EncryptionKey(BUCKET, conf); assertNotNull("Proxy password should not retrun null.", sseKey); assertEquals("Proxy password override did NOT work.", key, sseKey); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java index 72af1752b1253..22ce1e91c29f6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java @@ -26,6 +26,7 @@ import java.util.concurrent.atomic.AtomicInteger; import com.amazonaws.SignableRequest; +import com.amazonaws.auth.AWS4Signer; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.Signer; import com.amazonaws.services.s3.internal.AWSS3V4Signer; @@ -169,6 +170,20 @@ public CustomSigner() { LOG.info("Creating Signer #{}", c); } + /** + * Method to sign the incoming request with credentials. + * + * NOTE: In case of Client-side encryption, we do a "Generate Key" POST + * request to AWSKMS service rather than S3, this was causing the test to + * break. When this request happens, we have the endpoint in form of + * "kms.[REGION].amazonaws.com", and bucket-name becomes "kms". We can't + * use AWSS3V4Signer for AWSKMS service as it contains a header + * "x-amz-content-sha256:UNSIGNED-PAYLOAD", which returns a 400 bad + * request because the signature calculated by the service doesn't match + * what we sent. + * @param request the request to sign. + * @param credentials credentials used to sign the request. + */ @Override public void sign(SignableRequest request, AWSCredentials credentials) { int c = INVOCATION_COUNT.incrementAndGet(); @@ -182,10 +197,21 @@ public void sign(SignableRequest request, AWSCredentials credentials) { } catch (IOException e) { throw new RuntimeException("Failed to get current Ugi", e); } - AWSS3V4Signer realSigner = new AWSS3V4Signer(); - realSigner.setServiceName("s3"); - realSigner.setRegionName(lastStoreValue.conf.get(TEST_REGION_KEY)); - realSigner.sign(request, credentials); + if (bucketName.equals("kms")) { + AWS4Signer realKMSSigner = new AWS4Signer(); + realKMSSigner.setServiceName("kms"); + if (lastStoreValue != null) { + realKMSSigner.setRegionName(lastStoreValue.conf.get(TEST_REGION_KEY)); + } + realKMSSigner.sign(request, credentials); + } else { + AWSS3V4Signer realSigner = new AWSS3V4Signer(); + realSigner.setServiceName("s3"); + if (lastStoreValue != null) { + realSigner.setRegionName(lastStoreValue.conf.get(TEST_REGION_KEY)); + } + realSigner.sign(request, credentials); + } } public static int getInstantiationCount() { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java index 26655de9d4417..7ee53e0f8021c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java @@ -141,6 +141,10 @@ protected Configuration createConfiguration() { // disable if assume role opts are off assumeSessionTestsEnabled(conf); disableFilesystemCaching(conf); + String s3EncryptionMethod = + conf.getTrimmed(SERVER_SIDE_ENCRYPTION_ALGORITHM, + S3AEncryptionMethods.SSE_KMS.getMethod()); + String s3EncryptionKey = conf.getTrimmed(SERVER_SIDE_ENCRYPTION_KEY, ""); removeBaseAndBucketOverrides(conf, DELEGATION_TOKEN_BINDING, SERVER_SIDE_ENCRYPTION_ALGORITHM, @@ -149,10 +153,11 @@ protected Configuration createConfiguration() { UserGroupInformation.AuthenticationMethod.KERBEROS.name()); enableDelegationTokens(conf, getDelegationBinding()); conf.set(AWS_CREDENTIALS_PROVIDER, " "); - // switch to SSE_S3. + // switch to CSE-KMS(if specified) else SSE-KMS. if (conf.getBoolean(KEY_ENCRYPTION_TESTS, true)) { - conf.set(SERVER_SIDE_ENCRYPTION_ALGORITHM, - S3AEncryptionMethods.SSE_S3.getMethod()); + conf.set(SERVER_SIDE_ENCRYPTION_ALGORITHM, s3EncryptionMethod); + // KMS key ID a must if CSE-KMS is being tested. + conf.set(SERVER_SIDE_ENCRYPTION_KEY, s3EncryptionKey); } // set the YARN RM up for YARN tests. conf.set(YarnConfiguration.RM_PRINCIPAL, YARN_RM); @@ -358,10 +363,10 @@ public void testDelegatedFileSystem() throws Throwable { assertBoundToDT(delegatedFS, tokenKind); if (encryptionTestEnabled()) { assertNotNull("Encryption propagation failed", - delegatedFS.getServerSideEncryptionAlgorithm()); + delegatedFS.getS3EncryptionAlgorithm()); assertEquals("Encryption propagation failed", - fs.getServerSideEncryptionAlgorithm(), - delegatedFS.getServerSideEncryptionAlgorithm()); + fs.getS3EncryptionAlgorithm(), + delegatedFS.getS3EncryptionAlgorithm()); } verifyRestrictedPermissions(delegatedFS); @@ -393,10 +398,10 @@ public void testDelegatedFileSystem() throws Throwable { assertBoundToDT(secondDelegate, tokenKind); if (encryptionTestEnabled()) { assertNotNull("Encryption propagation failed", - secondDelegate.getServerSideEncryptionAlgorithm()); + secondDelegate.getS3EncryptionAlgorithm()); assertEquals("Encryption propagation failed", - fs.getServerSideEncryptionAlgorithm(), - secondDelegate.getServerSideEncryptionAlgorithm()); + fs.getS3EncryptionAlgorithm(), + secondDelegate.getS3EncryptionAlgorithm()); } ContractTestUtils.assertDeleted(secondDelegate, testPath, true); assertNotNull("unbounded DT", diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java index faef79c1f6379..012b6b9662cdb 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java @@ -25,7 +25,6 @@ import com.amazonaws.services.s3.AmazonS3; import org.assertj.core.api.Assertions; -import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -179,11 +178,9 @@ public void setup() throws Exception { if (useInconsistentClient()) { AmazonS3 client = getFileSystem() .getAmazonS3ClientForTesting("fault injection"); - Assert.assertTrue( - "AWS client is not inconsistent, even though the test requirees it " - + client, - client instanceof InconsistentAmazonS3Client); - inconsistentClient = (InconsistentAmazonS3Client) client; + if (client instanceof InconsistentAmazonS3Client) { + inconsistentClient = (InconsistentAmazonS3Client) client; + } } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextStatistics.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextStatistics.java index 5dc8be06b21c4..20ffd0f20d310 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextStatistics.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextStatistics.java @@ -15,6 +15,7 @@ import java.net.URI; +import com.amazonaws.services.s3.model.CryptoStorageMode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -23,6 +24,7 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.auth.STSClientFactory; @@ -30,6 +32,11 @@ import org.junit.Assert; import org.junit.Before; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_KEY; +import static org.apache.hadoop.fs.s3a.S3ATestConstants.KMS_KEY_GENERATION_REQUEST_PARAMS_BYTES_WRITTEN; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.CSE_PADDING_LENGTH; + /** * S3a implementation of FCStatisticsBaseTest. */ @@ -39,10 +46,11 @@ public class ITestS3AFileContextStatistics extends FCStatisticsBaseTest { LoggerFactory.getLogger(STSClientFactory.class); private Path testRootPath; + private Configuration conf; @Before public void setUp() throws Exception { - Configuration conf = new Configuration(); + conf = new Configuration(); fc = S3ATestUtils.createTestFileContext(conf); testRootPath = fileContextTestHelper.getTestRootPath(fc, "test"); fc.mkdir(testRootPath, @@ -62,10 +70,31 @@ protected void verifyReadBytes(FileSystem.Statistics stats) { Assert.assertEquals(2 * blockSize, stats.getBytesRead()); } + /** + * A method to verify the bytes written. + *
    + * NOTE: if Client side encryption is enabled, expected bytes written + * should increase by 16(padding of data) + bytes for the key ID set + 94(KMS + * key generation) in case of storage type{@link CryptoStorageMode} as + * ObjectMetadata(Default). If Crypto Storage mode is instruction file then + * add additional bytes as that file is stored separately and would account + * for bytes written. + * + * @param stats Filesystem statistics. + */ @Override protected void verifyWrittenBytes(FileSystem.Statistics stats) { //No extra bytes are written - Assert.assertEquals(blockSize, stats.getBytesWritten()); + long expectedBlockSize = blockSize; + if (conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM, "") + .equals(S3AEncryptionMethods.CSE_KMS.getMethod())) { + String keyId = conf.get(SERVER_SIDE_ENCRYPTION_KEY, ""); + // Adding padding length and KMS key generation bytes written. + expectedBlockSize += CSE_PADDING_LENGTH + keyId.getBytes().length + + KMS_KEY_GENERATION_REQUEST_PARAMS_BYTES_WRITTEN; + } + Assert.assertEquals("Mismatch in bytes written", expectedBlockSize, + stats.getBytesWritten()); } @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java index 9b63430139f10..be51b2fa09cb1 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java @@ -25,9 +25,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.EncryptionTestUtils; +import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; import org.apache.hadoop.fs.s3a.S3AFileSystem; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM; import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_KEY; import static org.apache.hadoop.fs.s3a.S3AEncryptionMethods.SSE_KMS; @@ -43,9 +45,11 @@ public class ITestS3AHugeFilesEncryption extends AbstractSTestS3AHugeFiles { public void setup() throws Exception { Configuration c = new Configuration(); String kmsKey = c.get(SERVER_SIDE_ENCRYPTION_KEY); - if (StringUtils.isBlank(kmsKey)) { + if (StringUtils.isBlank(kmsKey) || !c.get(SERVER_SIDE_ENCRYPTION_ALGORITHM) + .equals(S3AEncryptionMethods.CSE_KMS.name())) { skip(SERVER_SIDE_ENCRYPTION_KEY + " is not set for " + - SSE_KMS.getMethod()); + SSE_KMS.getMethod() + " or CSE-KMS algorithm is used instead of " + + "SSE-KMS"); } super.setup(); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java index a8635ea3cd792..a1c5c3f4b69fd 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java @@ -49,7 +49,7 @@ public void setup() throws Exception { protected Configuration createScaleConfiguration() { Configuration conf = super.createScaleConfiguration(); removeBaseAndBucketOverrides(conf, SERVER_SIDE_ENCRYPTION_KEY, - SERVER_SIDE_ENCRYPTION_ALGORITHM); + SERVER_SIDE_ENCRYPTION_ALGORITHM); S3ATestUtils.disableFilesystemCaching(conf); conf.set(Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM, getSSEAlgorithm().getMethod()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java index 4d1096fa79053..27798e21ed246 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java @@ -242,6 +242,7 @@ protected void logTimePerIOP(String operation, @Test public void testTimeToOpenAndReadWholeFileBlocks() throws Throwable { + skipIfClientSideEncryption(); requireCSVTestData(); int blockSize = _1MB; describe("Open the test file %s and read it in blocks of size %d", @@ -348,6 +349,7 @@ public void testReadaheadOutOfRange() throws Throwable { @Test public void testReadWithNormalPolicy() throws Throwable { describe("Read big blocks with a big readahead"); + skipIfClientSideEncryption(); executeSeekReadSequence(BIG_BLOCK_SIZE, BIG_BLOCK_SIZE * 2, S3AInputPolicy.Normal); assertStreamOpenedExactlyOnce(); @@ -356,6 +358,7 @@ public void testReadWithNormalPolicy() throws Throwable { @Test public void testDecompressionSequential128K() throws Throwable { describe("Decompress with a 128K readahead"); + skipIfClientSideEncryption(); executeDecompression(128 * _1KB, S3AInputPolicy.Sequential); assertStreamOpenedExactlyOnce(); } @@ -458,6 +461,7 @@ protected void executeSeekReadSequence(long blockSize, @Test public void testRandomIORandomPolicy() throws Throwable { + skipIfClientSideEncryption(); executeRandomIO(S3AInputPolicy.Random, (long) RANDOM_IO_SEQUENCE.length); assertEquals("streams aborted in " + streamStatistics, 0, streamStatistics.getAborted()); @@ -465,6 +469,7 @@ public void testRandomIORandomPolicy() throws Throwable { @Test public void testRandomIONormalPolicy() throws Throwable { + skipIfClientSideEncryption(); long expectedOpenCount = RANDOM_IO_SEQUENCE.length; executeRandomIO(S3AInputPolicy.Normal, expectedOpenCount); assertEquals("streams aborted in " + streamStatistics, From 5a34b3d599e38148630efde1449e0b1761b302bc Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Tue, 27 Jul 2021 16:47:41 +0200 Subject: [PATCH 516/666] YARN-6221. Entities missing from ATS when summary log file info got returned to the ATS before the domain log. Contributed by Xiaomin Zhang --- .../timeline/EntityGroupFSTimelineStore.java | 3 ++- .../TestEntityGroupFSTimelineStore.java | 25 +++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java index e9957c8a84983..c2992b4e83f1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java @@ -808,10 +808,11 @@ private void addSummaryLog(String attemptDirName, LogInfo log; if (isDomainLog) { log = new DomainLogInfo(attemptDirName, filename, owner); + summaryLogs.add(0, log); } else { log = new EntityLogInfo(attemptDirName, filename, owner); + summaryLogs.add(log); } - summaryLogs.add(log); } private synchronized void addDetailLog(String attemptDirName, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java index 984e157971f99..7302ae130db2c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java @@ -226,6 +226,31 @@ store.new AppLogs(mainTestAppId, mainTestAppDirPath, } } + @Test + public void testAppLogsDomainLogLastlyScanned() throws Exception { + EntityGroupFSTimelineStore.AppLogs appLogs = + store.new AppLogs(mainTestAppId, mainTestAppDirPath, + AppState.COMPLETED); + Path attemptDirPath = new Path(new Path(testActiveDirPath, + mainTestAppId.toString()), + getAttemptDirName(mainTestAppId)); + //Delete the domain log from AppDirPath so first scan won't find it + fs.delete(new Path(attemptDirPath, TEST_DOMAIN_LOG_FILE_NAME), false); + appLogs.scanForLogs(); + List summaryLogs = appLogs.getSummaryLogs(); + assertEquals(1, summaryLogs.size()); + assertEquals(TEST_SUMMARY_LOG_FILE_NAME, summaryLogs.get(0).getFilename()); + + //Generate the domain log + FSDataOutputStream out = fs.create( + new Path(attemptDirPath, TEST_DOMAIN_LOG_FILE_NAME)); + out.close(); + + appLogs.scanForLogs(); + assertEquals(2, summaryLogs.size()); + assertEquals(TEST_DOMAIN_LOG_FILE_NAME, summaryLogs.get(0).getFilename()); + } + @Test public void testMoveToDone() throws Exception { EntityGroupFSTimelineStore.AppLogs appLogs = From f6085a8184aa8fab766a71b96903bae101d469c1 Mon Sep 17 00:00:00 2001 From: JiaguodongF <272436024@qq.com> Date: Wed, 28 Jul 2021 00:55:21 +0800 Subject: [PATCH 517/666] HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid. (#3185) * HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid * HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid * HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid * HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid * HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid Co-authored-by: jiaguodong5 --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 +- .../hadoop/hdfs/server/balancer/Balancer.java | 10 ++-- .../server/balancer/BalancerParameters.java | 4 ++ .../hdfs/server/balancer/TestBalancer.java | 48 +++++++++++++++++++ 4 files changed, 59 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b3834ae96b05f..66e3c8b71edba 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -735,7 +735,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY = "dfs.balancer.getBlocks.hot-time-interval"; public static final long DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT = - 0; + 0L; public static final String DFS_BALANCER_KEYTAB_ENABLED_KEY = "dfs.balancer.keytab.enabled"; public static final boolean DFS_BALANCER_KEYTAB_ENABLED_DEFAULT = false; public static final String DFS_BALANCER_ADDRESS_KEY = "dfs.balancer.address"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 73a8e681915fc..67ecd5a4d24ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -324,10 +324,12 @@ static int getFailedTimesSinceLastSuccessfulBalance() { * Balancer prefer to get blocks which are belong to the cold files * created before this time period. */ - final long hotBlockTimeInterval = conf.getTimeDuration( - DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY, - DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); + final long hotBlockTimeInterval = + p.getHotBlockTimeInterval() != 0L ? p.getHotBlockTimeInterval() : + conf.getTimeDuration( + DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY, + DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); // DataNode configuration parameters for balancing final int maxConcurrentMovesPerNode = getInt(conf, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java index 3d9cbb1802083..2b53c15d1deee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java @@ -110,6 +110,10 @@ boolean getSortTopNodes() { return this.sortTopNodes; } + long getHotBlockTimeInterval() { + return this.hotBlockTimeInterval; + } + @Override public String toString() { return String.format("%s.%s [%s," + " threshold = %s," diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 9af8ade0ea50c..2070a332b6b91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -38,6 +38,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; +import java.lang.reflect.Field; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.junit.AfterClass; import static org.junit.Assert.assertEquals; @@ -67,6 +68,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.lang3.StringUtils; +import org.junit.Assert; import org.junit.Before; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1219,6 +1221,52 @@ public void testBalancerCliParseBlockpools() { assertEquals(1, p.getBlockPools().size()); } + @Test + public void testBalancerCliParseHotBlockTimeInterval() { + String[] parameters = new String[]{"-hotBlockTimeInterval", "1000"}; + BalancerParameters p = Balancer.Cli.parse(parameters); + assertEquals(1000, p.getHotBlockTimeInterval()); + } + + @Test + public void testBalancerDispatchHotBlockTimeInterval() { + String[] parameters = new String[]{"-hotBlockTimeInterval", "1000"}; + BalancerParameters p = Balancer.Cli.parse(parameters); + Configuration conf = new HdfsConfiguration(); + initConf(conf); + try { + cluster = new MiniDFSCluster + .Builder(conf) + .numDataNodes(0) + .setNNRedundancyConsiderLoad(false) + .build(); + cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_REPLICATION_KEY, + DFSConfigKeys.DFS_REPLICATION_DEFAULT); + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, + DFSConfigKeys.DFS_REPLICATION_DEFAULT); + cluster.waitClusterUp(); + cluster.waitActive(); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); + List connectors = + NameNodeConnector.newNameNodeConnectors(namenodes, + Balancer.class.getSimpleName(), + Balancer.BALANCER_ID_PATH, conf, + BalancerParameters.DEFAULT.getMaxIdleIteration()); + Balancer run = new Balancer( + connectors.get(0), p, new HdfsConfiguration()); + Field field = run.getClass().getDeclaredField("dispatcher"); + field.setAccessible(true); + Object dispatcher = field.get(run); + Field field1 = + dispatcher.getClass().getDeclaredField("hotBlockTimeInterval"); + field1.setAccessible(true); + Object hotBlockTimeInterval = field1.get(dispatcher); + assertEquals(1000, (long)hotBlockTimeInterval); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + /** * Verify balancer exits 0 on success. */ From 4ba35613e801cff4341a1e0f4e7b2b513527f452 Mon Sep 17 00:00:00 2001 From: sumangala-patki <70206833+sumangala-patki@users.noreply.github.com> Date: Tue, 27 Jul 2021 23:19:22 +0530 Subject: [PATCH 518/666] HADOOP-17765. ABFS: Use Unique File Paths in Tests. (#3153) Contributed by Sumangala Patki --- .../azurebfs/AbstractAbfsIntegrationTest.java | 21 ++++++- .../hadoop/fs/azurebfs/ITestAbfsClient.java | 11 ++-- .../ITestAbfsListStatusRemoteIterator.java | 6 +- .../azurebfs/ITestAbfsReadWriteAndSeek.java | 12 ++-- .../fs/azurebfs/ITestAbfsStatistics.java | 18 +++--- .../azurebfs/ITestAbfsStreamStatistics.java | 4 +- .../ITestAzureBlobFileSystemAppend.java | 23 +++---- ...ITestAzureBlobFileSystemAuthorization.java | 6 +- .../ITestAzureBlobFileSystemBackCompat.java | 9 ++- .../ITestAzureBlobFileSystemCheckAccess.java | 3 +- .../ITestAzureBlobFileSystemCopy.java | 2 +- .../ITestAzureBlobFileSystemCreate.java | 26 ++++---- ...ITestAzureBlobFileSystemDelegationSAS.java | 34 ++++++----- .../ITestAzureBlobFileSystemDelete.java | 27 +++++---- .../azurebfs/ITestAzureBlobFileSystemE2E.java | 59 +++++++++--------- .../ITestAzureBlobFileSystemFileStatus.java | 28 +++++---- .../ITestAzureBlobFileSystemFlush.java | 15 +++-- .../ITestAzureBlobFileSystemListStatus.java | 31 +++++----- .../ITestAzureBlobFileSystemMkDir.java | 8 +-- .../ITestAzureBlobFileSystemOauth.java | 58 ++++++++++-------- .../ITestAzureBlobFileSystemPermission.java | 3 +- .../ITestAzureBlobFileSystemRandomRead.java | 19 +++--- .../ITestAzureBlobFileSystemRename.java | 45 ++++++++------ ...ITestAzureBlobFileSystemRenameUnicode.java | 2 +- .../azurebfs/ITestAzureBlobFilesystemAcl.java | 5 +- .../fs/azurebfs/ITestCustomerProvidedKey.java | 60 ++++++++++++------- .../azurebfs/ITestFileSystemProperties.java | 31 ++++++---- .../azurebfs/ITestWasbAbfsCompatibility.java | 20 ++++--- .../services/ITestAbfsOutputStream.java | 26 ++++---- .../services/TestAbfsInputStream.java | 8 +-- 30 files changed, 350 insertions(+), 270 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java index 2497f8f1b63c5..ae24cf4a107cd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java @@ -26,11 +26,11 @@ import java.util.concurrent.Callable; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; @@ -84,6 +84,7 @@ public abstract class AbstractAbfsIntegrationTest extends private AuthType authType; private boolean useConfiguredFileSystem = false; private boolean usingFilesystemForSASTests = false; + private static final int SHORTENED_GUID_LEN = 12; protected AbstractAbfsIntegrationTest() throws Exception { fileSystemName = TEST_CONTAINER_PREFIX + UUID.randomUUID().toString(); @@ -270,7 +271,8 @@ protected void createFilesystemForSASTests() throws Exception { // so first create temporary instance of the filesystem using SharedKey // then re-use the filesystem it creates with SAS auth instead of SharedKey. AzureBlobFileSystem tempFs = (AzureBlobFileSystem) FileSystem.newInstance(rawConfig); - Assert.assertTrue(tempFs.exists(new Path("/"))); + ContractTestUtils.assertPathExists(tempFs, "This path should exist", + new Path("/")); abfsConfig.set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SAS.name()); usingFilesystemForSASTests = true; } @@ -440,7 +442,20 @@ public Path makeQualified(Path path) throws java.io.IOException { */ protected Path path(String filepath) throws IOException { return getFileSystem().makeQualified( - new Path(getTestPath(), filepath)); + new Path(getTestPath(), getUniquePath(filepath))); + } + + /** + * Generate a unique path using the given filepath. + * @param filepath path string + * @return unique path created from filepath and a GUID + */ + protected Path getUniquePath(String filepath) { + if (filepath.equals("/")) { + return new Path(filepath); + } + return new Path(filepath + StringUtils + .right(UUID.randomUUID().toString(), SHORTENED_GUID_LEN)); } /** diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java index 62720b30710ce..f90d410343532 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java @@ -93,7 +93,7 @@ public void testUnknownHost() throws Exception { public void testListPathWithValidListMaxResultsValues() throws IOException, ExecutionException, InterruptedException { final int fileCount = 10; - final String directory = "testWithValidListMaxResultsValues"; + final Path directory = getUniquePath("testWithValidListMaxResultsValues"); createDirectoryWithNFiles(directory, fileCount); final int[] testData = {fileCount + 100, fileCount + 1, fileCount, fileCount - 1, 1}; @@ -102,7 +102,7 @@ public void testListPathWithValidListMaxResultsValues() setListMaxResults(listMaxResults); int expectedListResultsSize = listMaxResults > fileCount ? fileCount : listMaxResults; - Assertions.assertThat(listPath(directory)).describedAs( + Assertions.assertThat(listPath(directory.toString())).describedAs( "AbfsClient.listPath result should contain %d items when " + "listMaxResults is %d and directory contains %d items", expectedListResultsSize, listMaxResults, fileCount) @@ -114,9 +114,10 @@ public void testListPathWithValidListMaxResultsValues() public void testListPathWithValueGreaterThanServerMaximum() throws IOException, ExecutionException, InterruptedException { setListMaxResults(LIST_MAX_RESULTS_SERVER + 100); - final String directory = "testWithValueGreaterThanServerMaximum"; + final Path directory = getUniquePath( + "testWithValueGreaterThanServerMaximum"); createDirectoryWithNFiles(directory, LIST_MAX_RESULTS_SERVER + 200); - Assertions.assertThat(listPath(directory)).describedAs( + Assertions.assertThat(listPath(directory.toString())).describedAs( "AbfsClient.listPath result will contain a maximum of %d items " + "even if listMaxResults >= %d or directory " + "contains more than %d items", LIST_MAX_RESULTS_SERVER, @@ -152,7 +153,7 @@ private void setListMaxResults(int listMaxResults) throws IOException { .setListMaxResults(listMaxResults); } - private void createDirectoryWithNFiles(String directory, int n) + private void createDirectoryWithNFiles(Path directory, int n) throws ExecutionException, InterruptedException { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java index e2973912e2323..9e81a0127b6cb 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java @@ -237,8 +237,8 @@ public void testHasNextForEmptyDir() throws Exception { @Test public void testHasNextForFile() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - String testFileName = "testFile"; - Path testFile = new Path(testFileName); + Path testFile = path("testFile"); + String testFileName = testFile.toString(); getFileSystem().create(testFile); setPageSize(10); RemoteIterator fsItr = fs.listStatusIterator(testFile); @@ -304,7 +304,7 @@ public String listStatus(Path path, String startFrom, private Path createTestDirectory() throws IOException { String testDirectoryName = "testDirectory" + System.currentTimeMillis(); - Path testDirectory = new Path(testDirectoryName); + Path testDirectory = path(testDirectoryName); getFileSystem().mkdirs(testDirectory); return testDirectory; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java index f4f0f231037e1..b0e82444afb34 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java @@ -45,7 +45,7 @@ */ @RunWith(Parameterized.class) public class ITestAbfsReadWriteAndSeek extends AbstractAbfsScaleTest { - private static final Path TEST_PATH = new Path("/testfile"); + private static final String TEST_PATH = "/testfile"; @Parameterized.Parameters(name = "Size={0}") public static Iterable sizes() { @@ -75,13 +75,14 @@ private void testReadWriteAndSeek(int bufferSize) throws Exception { final byte[] b = new byte[2 * bufferSize]; new Random().nextBytes(b); - try (FSDataOutputStream stream = fs.create(TEST_PATH)) { + Path testPath = path(TEST_PATH); + try (FSDataOutputStream stream = fs.create(testPath)) { stream.write(b); } final byte[] readBuffer = new byte[2 * bufferSize]; int result; - try (FSDataInputStream inputStream = fs.open(TEST_PATH)) { + try (FSDataInputStream inputStream = fs.open(testPath)) { ((AbfsInputStream) inputStream.getWrappedStream()).registerListener( new TracingHeaderValidator(abfsConfiguration.getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.READ, true, 0, @@ -112,7 +113,8 @@ public void testReadAheadRequestID() throws java.io.IOException { final byte[] b = new byte[bufferSize * 10]; new Random().nextBytes(b); - try (FSDataOutputStream stream = fs.create(TEST_PATH)) { + Path testPath = path(TEST_PATH); + try (FSDataOutputStream stream = fs.create(testPath)) { ((AbfsOutputStream) stream.getWrappedStream()).registerListener( new TracingHeaderValidator(abfsConfiguration.getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.WRITE, false, 0, @@ -126,7 +128,7 @@ public void testReadAheadRequestID() throws java.io.IOException { fs.registerListener( new TracingHeaderValidator(abfsConfiguration.getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.OPEN, false, 0)); - try (FSDataInputStream inputStream = fs.open(TEST_PATH)) { + try (FSDataInputStream inputStream = fs.open(testPath)) { ((AbfsInputStream) inputStream.getWrappedStream()).registerListener( new TracingHeaderValidator(abfsConfiguration.getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.READ, false, 0, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java index e6b572de9717c..98162fee08e9f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java @@ -91,7 +91,7 @@ public void testCreateStatistics() throws IOException { fs.mkdirs(createDirectoryPath); fs.createNonRecursive(createFilePath, FsPermission - .getDefault(), false, 1024, (short) 1, 1024, null); + .getDefault(), false, 1024, (short) 1, 1024, null).close(); Map metricMap = fs.getInstrumentationMap(); /* @@ -117,7 +117,7 @@ public void testCreateStatistics() throws IOException { fs.mkdirs(path(getMethodName() + "Dir" + i)); fs.createNonRecursive(path(getMethodName() + i), FsPermission.getDefault(), false, 1024, (short) 1, - 1024, null); + 1024, null).close(); } metricMap = fs.getInstrumentationMap(); @@ -160,7 +160,7 @@ public void testDeleteStatistics() throws IOException { files_deleted counters. */ fs.mkdirs(createDirectoryPath); - fs.create(path(createDirectoryPath + getMethodName())); + fs.create(path(createDirectoryPath + getMethodName())).close(); fs.delete(createDirectoryPath, true); Map metricMap = fs.getInstrumentationMap(); @@ -179,7 +179,7 @@ public void testDeleteStatistics() throws IOException { directories_deleted is called or not. */ fs.mkdirs(createDirectoryPath); - fs.create(createFilePath); + fs.create(createFilePath).close(); fs.delete(createDirectoryPath, true); metricMap = fs.getInstrumentationMap(); @@ -199,9 +199,9 @@ public void testOpenAppendRenameExists() throws IOException { Path createFilePath = path(getMethodName()); Path destCreateFilePath = path(getMethodName() + "New"); - fs.create(createFilePath); - fs.open(createFilePath); - fs.append(createFilePath); + fs.create(createFilePath).close(); + fs.open(createFilePath).close(); + fs.append(createFilePath).close(); assertTrue(fs.rename(createFilePath, destCreateFilePath)); Map metricMap = fs.getInstrumentationMap(); @@ -225,11 +225,11 @@ public void testOpenAppendRenameExists() throws IOException { //re-initialising Abfs to reset statistic values. fs.initialize(fs.getUri(), fs.getConf()); - fs.create(destCreateFilePath); + fs.create(destCreateFilePath).close(); for (int i = 0; i < NUMBER_OF_OPS; i++) { fs.open(destCreateFilePath); - fs.append(destCreateFilePath); + fs.append(destCreateFilePath).close(); } metricMap = fs.getInstrumentationMap(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java index 7eadb4bb8ff23..e5f182df2a1a2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java @@ -52,8 +52,8 @@ public void testAbfsStreamOps() throws Exception { + "Abfs"); final AzureBlobFileSystem fs = getFileSystem(); - Path smallOperationsFile = new Path("testOneReadWriteOps"); - Path largeOperationsFile = new Path("testLargeReadWriteOps"); + Path smallOperationsFile = path("testOneReadWriteOps"); + Path largeOperationsFile = path("testLargeReadWriteOps"); FileSystem.Statistics statistics = fs.getFsStatistics(); String testReadWriteOps = "test this"; statistics.reset(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java index 4b1e497f64f84..dbe4b42a67df3 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java @@ -35,8 +35,8 @@ */ public class ITestAzureBlobFileSystemAppend extends AbstractAbfsIntegrationTest { - private static final Path TEST_FILE_PATH = new Path("testfile"); - private static final Path TEST_FOLDER_PATH = new Path("testFolder"); + private static final String TEST_FILE_PATH = "testfile"; + private static final String TEST_FOLDER_PATH = "testFolder"; public ITestAzureBlobFileSystemAppend() throws Exception { super(); @@ -45,15 +45,15 @@ public ITestAzureBlobFileSystemAppend() throws Exception { @Test(expected = FileNotFoundException.class) public void testAppendDirShouldFail() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = TEST_FILE_PATH; + final Path filePath = path(TEST_FILE_PATH); fs.mkdirs(filePath); - fs.append(filePath, 0); + fs.append(filePath, 0).close(); } @Test public void testAppendWithLength0() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - try(FSDataOutputStream stream = fs.create(TEST_FILE_PATH)) { + try(FSDataOutputStream stream = fs.create(path(TEST_FILE_PATH))) { final byte[] b = new byte[1024]; new Random().nextBytes(b); stream.write(b, 1000, 0); @@ -65,28 +65,29 @@ public void testAppendWithLength0() throws Exception { @Test(expected = FileNotFoundException.class) public void testAppendFileAfterDelete() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = TEST_FILE_PATH; + final Path filePath = path(TEST_FILE_PATH); ContractTestUtils.touch(fs, filePath); fs.delete(filePath, false); - fs.append(filePath); + fs.append(filePath).close(); } @Test(expected = FileNotFoundException.class) public void testAppendDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path folderPath = TEST_FOLDER_PATH; + final Path folderPath = path(TEST_FOLDER_PATH); fs.mkdirs(folderPath); - fs.append(folderPath); + fs.append(folderPath).close(); } @Test public void testTracingForAppend() throws IOException { AzureBlobFileSystem fs = getFileSystem(); - fs.create(TEST_FILE_PATH); + Path testPath = path(TEST_FILE_PATH); + fs.create(testPath).close(); fs.registerListener(new TracingHeaderValidator( fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.APPEND, false, 0)); - fs.append(TEST_FILE_PATH, 10); + fs.append(testPath, 10); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java index 589c3a285f48f..338cf8476afd8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java @@ -99,7 +99,7 @@ public void testSASTokenProviderEmptySASToken() throws Exception { this.getConfiguration().getRawConfiguration()); intercept(SASTokenProviderException.class, () -> { - testFs.create(new org.apache.hadoop.fs.Path("/testFile")); + testFs.create(new org.apache.hadoop.fs.Path("/testFile")).close(); }); } @@ -114,7 +114,7 @@ public void testSASTokenProviderNullSASToken() throws Exception { testFs.initialize(fs.getUri(), this.getConfiguration().getRawConfiguration()); intercept(SASTokenProviderException.class, ()-> { - testFs.create(new org.apache.hadoop.fs.Path("/testFile")); + testFs.create(new org.apache.hadoop.fs.Path("/testFile")).close(); }); } @@ -297,7 +297,7 @@ private void executeOp(Path reqPath, AzureBlobFileSystem fs, fs.listStatus(reqPath); break; case CreatePath: - fs.create(reqPath); + fs.create(reqPath).close(); break; case RenamePath: fs.rename(reqPath, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java index 1dd1058c94ba9..2941b96fefa2e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java @@ -50,13 +50,16 @@ public void testBlobBackCompat() throws Exception { CloudBlobContainer container = blobClient.getContainerReference(this.getFileSystemName()); container.createIfNotExists(); - CloudBlockBlob blockBlob = container.getBlockBlobReference("test/10/10/10"); + Path testPath = getUniquePath("test"); + CloudBlockBlob blockBlob = container + .getBlockBlobReference(testPath + "/10/10/10"); blockBlob.uploadText(""); - blockBlob = container.getBlockBlobReference("test/10/123/3/2/1/3"); + blockBlob = container.getBlockBlobReference(testPath + "/10/123/3/2/1/3"); blockBlob.uploadText(""); - FileStatus[] fileStatuses = fs.listStatus(new Path("/test/10/")); + FileStatus[] fileStatuses = fs + .listStatus(new Path(String.format("/%s/10/", testPath))); assertEquals(2, fileStatuses.length); assertEquals("10", fileStatuses[0].getPath().getName()); assertTrue(fileStatuses[0].isDirectory()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java index f5dc5fdd212cb..fa05b77c61d71 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java @@ -352,7 +352,8 @@ private void modifyAcl(Path file, String uid, FsAction fsAction) private Path setupTestDirectoryAndUserAccess(String testFileName, FsAction fsAction) throws Exception { - Path file = new Path(TEST_FOLDER_PATH + testFileName); + Path testPath = path(TEST_FOLDER_PATH); + Path file = new Path(testPath + testFileName); file = this.superUserFs.makeQualified(file); this.superUserFs.delete(file, true); this.superUserFs.create(file); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java index 917ee9ce1b07e..aabaf82b622a8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java @@ -53,7 +53,7 @@ public void testCopyFromLocalFileSystem() throws Exception { localFs.delete(localFilePath, true); try { writeString(localFs, localFilePath, "Testing"); - Path dstPath = new Path("copiedFromLocal"); + Path dstPath = path("copiedFromLocal"); assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false, fs.getConf())); assertIsFile(fs, dstPath); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java index 86689a1e61940..2f23ac5c5c708 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java @@ -69,7 +69,7 @@ public class ITestAzureBlobFileSystemCreate extends AbstractAbfsIntegrationTest { private static final Path TEST_FILE_PATH = new Path("testfile"); - private static final Path TEST_FOLDER_PATH = new Path("testFolder"); + private static final String TEST_FOLDER_PATH = "testFolder"; private static final String TEST_CHILD_FILE = "childFile"; public ITestAzureBlobFileSystemCreate() throws Exception { @@ -92,7 +92,8 @@ public void testEnsureFileCreatedImmediately() throws Exception { @SuppressWarnings("deprecation") public void testCreateNonRecursive() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null); fail("Should've thrown"); @@ -101,7 +102,7 @@ public void testCreateNonRecursive() throws Exception { fs.registerListener(new TracingHeaderValidator( fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.MKDIR, false, 0)); - fs.mkdirs(TEST_FOLDER_PATH); + fs.mkdirs(testFolderPath); fs.registerListener(null); fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null) @@ -113,13 +114,14 @@ public void testCreateNonRecursive() throws Exception { @SuppressWarnings("deprecation") public void testCreateNonRecursive1() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 1024, (short) 1, 1024, null); fail("Should've thrown"); } catch (FileNotFoundException expected) { } - fs.mkdirs(TEST_FOLDER_PATH); + fs.mkdirs(testFolderPath); fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null) .close(); assertIsFile(fs, testFile); @@ -131,13 +133,14 @@ public void testCreateNonRecursive1() throws Exception { public void testCreateNonRecursive2() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testFile = new Path(testFolderPath, TEST_CHILD_FILE); try { fs.createNonRecursive(testFile, FsPermission.getDefault(), false, 1024, (short) 1, 1024, null); fail("Should've thrown"); } catch (FileNotFoundException e) { } - fs.mkdirs(TEST_FOLDER_PATH); + fs.mkdirs(testFolderPath); fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null) .close(); assertIsFile(fs, testFile); @@ -149,7 +152,8 @@ public void testCreateNonRecursive2() throws Exception { @Test public void testWriteAfterClose() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); FSDataOutputStream out = fs.create(testPath); out.close(); intercept(IOException.class, () -> out.write('a')); @@ -169,7 +173,8 @@ public void testWriteAfterClose() throws Throwable { @Test public void testTryWithResources() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); try (FSDataOutputStream out = fs.create(testPath)) { out.write('1'); out.hsync(); @@ -202,7 +207,8 @@ public void testTryWithResources() throws Throwable { @Test public void testFilterFSWriteAfterClose() throws Throwable { final AzureBlobFileSystem fs = getFileSystem(); - Path testPath = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE); + Path testFolderPath = path(TEST_FOLDER_PATH); + Path testPath = new Path(testFolderPath, TEST_CHILD_FILE); FSDataOutputStream out = fs.create(testPath); intercept(FileNotFoundException.class, () -> { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java index e86a4ec7bd1b2..5cba89ac4a5a6 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java @@ -53,6 +53,8 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_SAS_TOKEN_PROVIDER_TYPE; import static org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH; import static org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers.aclEntry; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; @@ -223,15 +225,15 @@ public void testRename() throws Exception { stream.writeBytes("hello"); } - assertFalse(fs.exists(destinationPath)); + assertPathDoesNotExist(fs, "This path should not exist", destinationPath); fs.rename(sourcePath, destinationPath); - assertFalse(fs.exists(sourcePath)); - assertTrue(fs.exists(destinationPath)); + assertPathDoesNotExist(fs, "This path should not exist", sourcePath); + assertPathExists(fs, "This path should exist", destinationPath); - assertFalse(fs.exists(destinationDir)); + assertPathDoesNotExist(fs, "This path should not exist", destinationDir); fs.rename(sourceDir, destinationDir); - assertFalse(fs.exists(sourceDir)); - assertTrue(fs.exists(destinationDir)); + assertPathDoesNotExist(fs, "This path should not exist", sourceDir); + assertPathExists(fs, "This path should exist", destinationDir); } @Test @@ -246,13 +248,13 @@ public void testDelete() throws Exception { stream.writeBytes("hello"); } - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", filePath); fs.delete(filePath, false); - assertFalse(fs.exists(filePath)); + assertPathDoesNotExist(fs, "This path should not exist", filePath); - assertTrue(fs.exists(dirPath)); + assertPathExists(fs, "This path should exist", dirPath); fs.delete(dirPath, false); - assertFalse(fs.exists(dirPath)); + assertPathDoesNotExist(fs, "This path should not exist", dirPath); } @Test @@ -267,11 +269,11 @@ public void testDeleteRecursive() throws Exception { stream.writeBytes("hello"); } - assertTrue(fs.exists(dirPath)); - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", dirPath); + assertPathExists(fs, "This path should exist", filePath); fs.delete(dirPath, true); - assertFalse(fs.exists(filePath)); - assertFalse(fs.exists(dirPath)); + assertPathDoesNotExist(fs, "This path should not exist", filePath); + assertPathDoesNotExist(fs, "This path should not exist", dirPath); } @Test @@ -395,8 +397,8 @@ public void testProperties() throws Exception { @Test public void testSignatureMask() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - String src = "/testABC/test.xt"; - fs.create(new Path(src)); + String src = String.format("/testABC/test%s.xt", UUID.randomUUID()); + fs.create(new Path(src)).close(); AbfsRestOperation abfsHttpRestOperation = fs.getAbfsClient() .renamePath(src, "/testABC" + "/abc.txt", null, getTestTracingContext(fs, false)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java index ad144dffc4e82..b9b846f52d2dd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java @@ -79,12 +79,13 @@ public ITestAzureBlobFileSystemDelete() throws Exception { public void testDeleteRoot() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - fs.mkdirs(new Path("/testFolder0")); - fs.mkdirs(new Path("/testFolder1")); - fs.mkdirs(new Path("/testFolder2")); - touch(new Path("/testFolder1/testfile")); - touch(new Path("/testFolder1/testfile2")); - touch(new Path("/testFolder1/testfile3")); + Path testPath = path("/testFolder"); + fs.mkdirs(new Path(testPath + "_0")); + fs.mkdirs(new Path(testPath + "_1")); + fs.mkdirs(new Path(testPath + "_2")); + touch(new Path(testPath + "_1/testfile")); + touch(new Path(testPath + "_1/testfile2")); + touch(new Path(testPath + "_1/testfile3")); Path root = new Path("/"); FileStatus[] ls = fs.listStatus(root); @@ -98,7 +99,7 @@ public void testDeleteRoot() throws Exception { @Test() public void testOpenFileAfterDelete() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testfile = new Path("/testFile"); + Path testfile = path("/testFile"); touch(testfile); assertDeleted(fs, testfile, false); @@ -109,7 +110,7 @@ public void testOpenFileAfterDelete() throws Exception { @Test public void testEnsureFileIsDeleted() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testfile = new Path("testfile"); + Path testfile = path("testfile"); touch(testfile); assertDeleted(fs, testfile, false); assertPathDoesNotExist(fs, "deleted", testfile); @@ -118,10 +119,10 @@ public void testEnsureFileIsDeleted() throws Exception { @Test public void testDeleteDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path dir = new Path("testfile"); + Path dir = path("testfile"); fs.mkdirs(dir); - fs.mkdirs(new Path("testfile/test1")); - fs.mkdirs(new Path("testfile/test1/test2")); + fs.mkdirs(new Path(dir + "/test1")); + fs.mkdirs(new Path(dir + "/test1/test2")); assertDeleted(fs, dir, true); assertPathDoesNotExist(fs, "deleted", dir); @@ -133,8 +134,9 @@ public void testDeleteFirstLevelDirectory() throws Exception { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); + Path dir = path("/test"); for (int i = 0; i < 1000; i++) { - final Path fileName = new Path("/test/" + i); + final Path fileName = new Path(dir + "/" + i); Callable callable = new Callable() { @Override public Void call() throws Exception { @@ -151,7 +153,6 @@ public Void call() throws Exception { } es.shutdownNow(); - Path dir = new Path("/test"); fs.registerListener(new TracingHeaderValidator( fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.DELETE, false, 0)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java index 05c3855f5c89d..56016a39470e4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java @@ -33,6 +33,8 @@ import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_TOLERATE_CONCURRENT_APPEND; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** @@ -52,14 +54,14 @@ public ITestAzureBlobFileSystemE2E() throws Exception { @Test public void testWriteOneByteToFile() throws Exception { - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); } @Test public void testReadWriteBytesToFile() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); try(FSDataInputStream inputStream = fs.open(testFilePath, TEST_DEFAULT_BUFFER_SIZE)) { @@ -78,7 +80,7 @@ public void testOOBWritesAndReadFail() throws Exception { final byte[] b = new byte[2 * readBufferSize]; new Random().nextBytes(b); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); try(FSDataOutputStream writeStream = fs.create(testFilePath)) { writeStream.write(b); writeStream.flush(); @@ -107,7 +109,7 @@ public void testOOBWritesAndReadSucceed() throws Exception { byte[] bytesToRead = new byte[readBufferSize]; final byte[] b = new byte[2 * readBufferSize]; new Random().nextBytes(b); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); try (FSDataOutputStream writeStream = fs.create(testFilePath)) { writeStream.write(b); @@ -130,7 +132,7 @@ public void testOOBWritesAndReadSucceed() throws Exception { @Test public void testWriteWithBufferOffset() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); final byte[] b = new byte[1024 * 1000]; new Random().nextBytes(b); @@ -151,7 +153,7 @@ public void testWriteWithBufferOffset() throws Exception { @Test public void testReadWriteHeavyBytesToFileWithSmallerChunks() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); final byte[] writeBuffer = new byte[5 * 1000 * 1024]; new Random().nextBytes(writeBuffer); @@ -171,50 +173,51 @@ public void testReadWriteHeavyBytesToFileWithSmallerChunks() throws Exception { @Test public void testReadWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); testWriteOneByteToFile(testFilePath); - FSDataInputStream inputStream = fs.open(testFilePath, TEST_DEFAULT_BUFFER_SIZE); - fs.delete(testFilePath, true); - assertFalse(fs.exists(testFilePath)); + try (FSDataInputStream inputStream = fs.open(testFilePath, + TEST_DEFAULT_BUFFER_SIZE)) { + fs.delete(testFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); - intercept(FileNotFoundException.class, - () -> inputStream.read(new byte[1])); + intercept(FileNotFoundException.class, () -> inputStream.read(new byte[1])); + } } @Test public void testWriteWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); - FSDataOutputStream stream = fs.create(testFilePath); - assertTrue(fs.exists(testFilePath)); - stream.write(TEST_BYTE); + try (FSDataOutputStream stream = fs.create(testFilePath)) { + assertPathExists(fs, "Path should exist", testFilePath); + stream.write(TEST_BYTE); - fs.delete(testFilePath, true); - assertFalse(fs.exists(testFilePath)); + fs.delete(testFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); - // trigger append call - intercept(FileNotFoundException.class, - () -> stream.close()); + // trigger append call + intercept(FileNotFoundException.class, () -> stream.close()); + } } @Test public void testFlushWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - final Path testFilePath = new Path(methodName.getMethodName()); + final Path testFilePath = path(methodName.getMethodName()); if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { return; } - FSDataOutputStream stream = fs.create(testFilePath); - assertTrue(fs.exists(testFilePath)); + try (FSDataOutputStream stream = fs.create(testFilePath)) { + assertPathExists(fs, "This path should exist", testFilePath); - fs.delete(testFilePath, true); - assertFalse(fs.exists(testFilePath)); + fs.delete(testFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", testFilePath); - intercept(FileNotFoundException.class, - () -> stream.close()); + intercept(FileNotFoundException.class, () -> stream.close()); + } } private void testWriteOneByteToFile(Path testFilePath) throws Exception { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java index b9498be89a1ff..4fa7a0fca68ae 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java @@ -27,6 +27,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; + /** * Test FileStatus. */ @@ -37,8 +39,8 @@ public class ITestAzureBlobFileSystemFileStatus extends private static final String DEFAULT_UMASK_VALUE = "027"; private static final String FULL_PERMISSION = "777"; - private static final Path TEST_FILE = new Path("testFile"); - private static final Path TEST_FOLDER = new Path("testDir"); + private static final String TEST_FILE = "testFile"; + private static final String TEST_FOLDER = "testDir"; public ITestAzureBlobFileSystemFileStatus() throws Exception { super(); @@ -57,8 +59,9 @@ public void testEnsureStatusWorksForRoot() throws Exception { public void testFileStatusPermissionsAndOwnerAndGroup() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); - touch(TEST_FILE); - validateStatus(fs, TEST_FILE, false); + Path testFile = path(TEST_FILE); + touch(testFile); + validateStatus(fs, testFile, false); } private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name, final boolean isDir) @@ -93,9 +96,10 @@ private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name, public void testFolderStatusPermissionsAndOwnerAndGroup() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); - fs.mkdirs(TEST_FOLDER); + Path testFolder = path(TEST_FOLDER); + fs.mkdirs(testFolder); - validateStatus(fs, TEST_FOLDER, true); + validateStatus(fs, testFolder, true); } @Test @@ -108,11 +112,11 @@ public void testAbfsPathWithHost() throws IOException { Path pathwithouthost2 = new Path("/abfs/file2.txt"); // verify compatibility of this path format - fs.create(pathWithHost1); - assertTrue(fs.exists(pathwithouthost1)); + fs.create(pathWithHost1).close(); + assertPathExists(fs, "This path should exist", pathwithouthost1); - fs.create(pathwithouthost2); - assertTrue(fs.exists(pathWithHost2)); + fs.create(pathwithouthost2).close(); + assertPathExists(fs, "This path should exist", pathWithHost2); // verify get FileStatus fileStatus1 = fs.getFileStatus(pathWithHost1); @@ -125,13 +129,13 @@ public void testAbfsPathWithHost() throws IOException { @Test public void testLastModifiedTime() throws IOException { AzureBlobFileSystem fs = this.getFileSystem(); - Path testFilePath = new Path("childfile1.txt"); + Path testFilePath = path("childfile1.txt"); long createStartTime = System.currentTimeMillis(); long minCreateStartTime = (createStartTime / 1000) * 1000 - 1; // Dividing and multiplying by 1000 to make last 3 digits 0. // It is observed that modification time is returned with last 3 // digits 0 always. - fs.create(testFilePath); + fs.create(testFilePath).close(); long createEndTime = System.currentTimeMillis(); FileStatus fStat = fs.getFileStatus(testFilePath); long lastModifiedTime = fStat.getModificationTime(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java index 40a551cd60a30..d27f9fa62194d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java @@ -316,14 +316,13 @@ public void testTracingHeaderForAppendBlob() throws Exception { byte[] buf = new byte[10]; new Random().nextBytes(buf); - FSDataOutputStream out = fs.create(new Path("/testFile")); - ((AbfsOutputStream) out.getWrappedStream()).registerListener( - new TracingHeaderValidator( - fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), - fs.getFileSystemId(), FSOperationType.WRITE, false, 0, - ((AbfsOutputStream) out.getWrappedStream()).getStreamID())); - out.write(buf); - out.hsync(); + try (FSDataOutputStream out = fs.create(new Path("/testFile"))) { + ((AbfsOutputStream) out.getWrappedStream()).registerListener(new TracingHeaderValidator( + fs.getAbfsStore().getAbfsConfiguration().getClientCorrelationId(), fs.getFileSystemId(), FSOperationType.WRITE, false, 0, + ((AbfsOutputStream) out.getWrappedStream()).getStreamID())); + out.write(buf); + out.hsync(); + } } @Test diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java index dc9ef9bffb945..8d1330b5ea7dd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java @@ -99,7 +99,7 @@ public Void call() throws Exception { @Test public void testListFileVsListDir() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path path = new Path("/testFile"); + Path path = path("/testFile"); try(FSDataOutputStream ignored = fs.create(path)) { FileStatus[] testFiles = fs.listStatus(path); assertEquals("length of test files", 1, testFiles.length); @@ -111,19 +111,20 @@ public void testListFileVsListDir() throws Exception { @Test public void testListFileVsListDir2() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - fs.mkdirs(new Path("/testFolder")); - fs.mkdirs(new Path("/testFolder/testFolder2")); - fs.mkdirs(new Path("/testFolder/testFolder2/testFolder3")); - Path testFile0Path = new Path("/testFolder/testFolder2/testFolder3/testFile"); + Path testFolder = path("/testFolder"); + fs.mkdirs(testFolder); + fs.mkdirs(new Path(testFolder + "/testFolder2")); + fs.mkdirs(new Path(testFolder + "/testFolder2/testFolder3")); + Path testFile0Path = new Path( + testFolder + "/testFolder2/testFolder3/testFile"); ContractTestUtils.touch(fs, testFile0Path); FileStatus[] testFiles = fs.listStatus(testFile0Path); assertEquals("Wrong listing size of file " + testFile0Path, 1, testFiles.length); FileStatus file0 = testFiles[0]; - assertEquals("Wrong path for " + file0, - new Path(getTestUrl(), "/testFolder/testFolder2/testFolder3/testFile"), - file0.getPath()); + assertEquals("Wrong path for " + file0, new Path(getTestUrl(), + testFolder + "/testFolder2/testFolder3/testFile"), file0.getPath()); assertIsFileReference(file0); } @@ -136,18 +137,18 @@ public void testListNonExistentDir() throws Exception { @Test public void testListFiles() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path testDir = new Path("/test"); + Path testDir = path("/test"); fs.mkdirs(testDir); FileStatus[] fileStatuses = fs.listStatus(new Path("/")); assertEquals(1, fileStatuses.length); - fs.mkdirs(new Path("/test/sub")); + fs.mkdirs(new Path(testDir + "/sub")); fileStatuses = fs.listStatus(testDir); assertEquals(1, fileStatuses.length); assertEquals("sub", fileStatuses[0].getPath().getName()); assertIsDirectoryReference(fileStatuses[0]); - Path childF = fs.makeQualified(new Path("/test/f")); + Path childF = fs.makeQualified(new Path(testDir + "/f")); touch(childF); fileStatuses = fs.listStatus(testDir); assertEquals(2, fileStatuses.length); @@ -193,7 +194,7 @@ public void testMkdirTrailingPeriodDirName() throws IOException { final AzureBlobFileSystem fs = getFileSystem(); Path nontrailingPeriodDir = path("testTrailingDir/dir"); - Path trailingPeriodDir = path("testTrailingDir/dir."); + Path trailingPeriodDir = new Path("testMkdirTrailingDir/dir."); assertMkdirs(fs, nontrailingPeriodDir); @@ -212,8 +213,8 @@ public void testCreateTrailingPeriodFileName() throws IOException { boolean exceptionThrown = false; final AzureBlobFileSystem fs = getFileSystem(); - Path trailingPeriodFile = path("testTrailingDir/file."); - Path nontrailingPeriodFile = path("testTrailingDir/file"); + Path trailingPeriodFile = new Path("testTrailingDir/file."); + Path nontrailingPeriodFile = path("testCreateTrailingDir/file"); createFile(fs, nontrailingPeriodFile, false, new byte[0]); assertPathExists(fs, "Trailing period file does not exist", @@ -235,7 +236,7 @@ public void testRenameTrailingPeriodFile() throws IOException { final AzureBlobFileSystem fs = getFileSystem(); Path nonTrailingPeriodFile = path("testTrailingDir/file"); - Path trailingPeriodFile = path("testTrailingDir/file."); + Path trailingPeriodFile = new Path("testRenameTrailingDir/file."); createFile(fs, nonTrailingPeriodFile, false, new byte[0]); try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java index a9a059e7cda56..bc6f35c66bc53 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java @@ -49,7 +49,7 @@ public void testCreateDirWithExistingDir() throws Exception { DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE || !getIsNamespaceEnabled( getFileSystem())); final AzureBlobFileSystem fs = getFileSystem(); - Path path = new Path("testFolder"); + Path path = path("testFolder"); assertMkdirs(fs, path); assertMkdirs(fs, path); } @@ -64,7 +64,7 @@ public void testMkdirExistingDirOverwriteFalse() throws Exception { Configuration config = new Configuration(this.getRawConfiguration()); config.set(FS_AZURE_ENABLE_MKDIR_OVERWRITE, Boolean.toString(false)); AzureBlobFileSystem fs = getFileSystem(config); - Path path = new Path("testFolder"); + Path path = path("testFolder"); assertMkdirs(fs, path); //checks that mkdirs returns true long timeCreated = fs.getFileStatus(path).getModificationTime(); assertMkdirs(fs, path); //call to existing dir should return success @@ -78,8 +78,8 @@ public void createDirWithExistingFilename() throws Exception { DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE && getIsNamespaceEnabled( getFileSystem())); final AzureBlobFileSystem fs = getFileSystem(); - Path path = new Path("testFilePath"); - fs.create(path); + Path path = path("testFilePath"); + fs.create(path).close(); assertTrue(fs.getFileStatus(path).isFile()); intercept(FileAlreadyExistsException.class, () -> fs.mkdirs(path)); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java index 62bac31904546..f27e75839b73f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java @@ -45,6 +45,8 @@ import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_ID; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_SECRET; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; /** * Test Azure Oauth with Blob Data contributor role and Blob Data Reader role. @@ -54,8 +56,8 @@ public class ITestAzureBlobFileSystemOauth extends AbstractAbfsIntegrationTest{ private static final Path FILE_PATH = new Path("/testFile"); - private static final Path EXISTED_FILE_PATH = new Path("/existedFile"); - private static final Path EXISTED_FOLDER_PATH = new Path("/existedFolder"); + private static final String EXISTED_FILE_PATH = "/existedFile"; + private static final String EXISTED_FOLDER_PATH = "/existedFolder"; private static final Logger LOG = LoggerFactory.getLogger(ITestAbfsStreamStatistics.class); @@ -72,7 +74,9 @@ public void testBlobDataContributor() throws Exception { String secret = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET); Assume.assumeTrue("Contributor client secret not provided", secret != null); - prepareFiles(); + Path existedFilePath = path(EXISTED_FILE_PATH); + Path existedFolderPath = path(EXISTED_FOLDER_PATH); + prepareFiles(existedFilePath, existedFolderPath); final AzureBlobFileSystem fs = getBlobConributor(); @@ -80,39 +84,39 @@ public void testBlobDataContributor() throws Exception { try(FSDataOutputStream stream = fs.create(FILE_PATH)) { stream.write(0); } - assertTrue(fs.exists(FILE_PATH)); + assertPathExists(fs, "This path should exist", FILE_PATH); FileStatus fileStatus = fs.getFileStatus(FILE_PATH); assertEquals(1, fileStatus.getLen()); // delete file assertTrue(fs.delete(FILE_PATH, true)); - assertFalse(fs.exists(FILE_PATH)); + assertPathDoesNotExist(fs, "This path should not exist", FILE_PATH); // Verify Blob Data Contributor has full access to existed folder, file // READ FOLDER - assertTrue(fs.exists(EXISTED_FOLDER_PATH)); + assertPathExists(fs, "This path should exist", existedFolderPath); //DELETE FOLDER - fs.delete(EXISTED_FOLDER_PATH, true); - assertFalse(fs.exists(EXISTED_FOLDER_PATH)); + fs.delete(existedFolderPath, true); + assertPathDoesNotExist(fs, "This path should not exist", existedFolderPath); // READ FILE - try (FSDataInputStream stream = fs.open(EXISTED_FILE_PATH)) { + try (FSDataInputStream stream = fs.open(existedFilePath)) { assertTrue(stream.read() != 0); } - assertEquals(0, fs.getFileStatus(EXISTED_FILE_PATH).getLen()); + assertEquals(0, fs.getFileStatus(existedFilePath).getLen()); // WRITE FILE - try (FSDataOutputStream stream = fs.append(EXISTED_FILE_PATH)) { + try (FSDataOutputStream stream = fs.append(existedFilePath)) { stream.write(0); } - assertEquals(1, fs.getFileStatus(EXISTED_FILE_PATH).getLen()); + assertEquals(1, fs.getFileStatus(existedFilePath).getLen()); // REMOVE FILE - fs.delete(EXISTED_FILE_PATH, true); - assertFalse(fs.exists(EXISTED_FILE_PATH)); + fs.delete(existedFilePath, true); + assertPathDoesNotExist(fs, "This path should not exist", existedFilePath); } /* @@ -125,7 +129,9 @@ public void testBlobDataReader() throws Exception { String secret = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_SECRET); Assume.assumeTrue("Reader client secret not provided", secret != null); - prepareFiles(); + Path existedFilePath = path(EXISTED_FILE_PATH); + Path existedFolderPath = path(EXISTED_FOLDER_PATH); + prepareFiles(existedFilePath, existedFolderPath); final AzureBlobFileSystem fs = getBlobReader(); // Use abfsStore in this test to verify the ERROR code in AbfsRestOperationException @@ -134,25 +140,25 @@ public void testBlobDataReader() throws Exception { // TEST READ FS Map properties = abfsStore.getFilesystemProperties(tracingContext); // TEST READ FOLDER - assertTrue(fs.exists(EXISTED_FOLDER_PATH)); + assertPathExists(fs, "This path should exist", existedFolderPath); // TEST DELETE FOLDER try { - abfsStore.delete(EXISTED_FOLDER_PATH, true, tracingContext); + abfsStore.delete(existedFolderPath, true, tracingContext); } catch (AbfsRestOperationException e) { assertEquals(AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH, e.getErrorCode()); } // TEST READ FILE - try (InputStream inputStream = abfsStore.openFileForRead(EXISTED_FILE_PATH, null, - tracingContext)) { + try (InputStream inputStream = abfsStore + .openFileForRead(existedFilePath, null, tracingContext)) { assertTrue(inputStream.read() != 0); } // TEST WRITE FILE try { - abfsStore.openFileForWrite(EXISTED_FILE_PATH, fs.getFsStatistics(), true, - tracingContext); + abfsStore.openFileForWrite(existedFilePath, fs.getFsStatistics(), true, + tracingContext); } catch (AbfsRestOperationException e) { assertEquals(AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH, e.getErrorCode()); } finally { @@ -161,14 +167,14 @@ public void testBlobDataReader() throws Exception { } - private void prepareFiles() throws IOException { + private void prepareFiles(Path existedFilePath, Path existedFolderPath) throws IOException { // create test files/folders to verify access control diff between // Blob data contributor and Blob data reader final AzureBlobFileSystem fs = this.getFileSystem(); - fs.create(EXISTED_FILE_PATH); - assertTrue(fs.exists(EXISTED_FILE_PATH)); - fs.mkdirs(EXISTED_FOLDER_PATH); - assertTrue(fs.exists(EXISTED_FOLDER_PATH)); + fs.create(existedFilePath).close(); + assertPathExists(fs, "This path should exist", existedFilePath); + fs.mkdirs(existedFolderPath); + assertPathExists(fs, "This path should exist", existedFolderPath); } private AzureBlobFileSystem getBlobConributor() throws Exception { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java index 138e2023786bc..0d644b6c743d0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java @@ -84,7 +84,8 @@ public void testFilePermission() throws Exception { new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)); fs.removeDefaultAcl(path.getParent()); - fs.create(path, permission, true, KILOBYTE, (short) 1, KILOBYTE - 1, null); + fs.create(path, permission, true, KILOBYTE, (short) 1, KILOBYTE - 1, + null).close(); FileStatus status = fs.getFileStatus(path); Assert.assertEquals(permission.applyUMask(DEFAULT_UMASK_PERMISSION), status.getPermission()); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java index 0a9713cffa94b..c1f0e06439950 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java @@ -86,7 +86,7 @@ public ITestAzureBlobFileSystemRandomRead() throws Exception { @Test public void testBasicRead() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testBasicRead"); + Path testPath = path(TEST_FILE_PREFIX + "_testBasicRead"); assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -115,7 +115,7 @@ public void testBasicRead() throws Exception { public void testRandomRead() throws Exception { Assume.assumeFalse("This test does not support namespace enabled account", getIsNamespaceEnabled(getFileSystem())); - Path testPath = new Path(TEST_FILE_PREFIX + "_testRandomRead"); + Path testPath = path(TEST_FILE_PREFIX + "_testRandomRead"); assumeHugeFileExists(testPath); try ( @@ -174,7 +174,7 @@ public void testRandomRead() throws Exception { */ @Test public void testSeekToNewSource() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSeekToNewSource"); + Path testPath = path(TEST_FILE_PREFIX + "_testSeekToNewSource"); assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -189,7 +189,7 @@ public void testSeekToNewSource() throws Exception { */ @Test public void testSkipBounds() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSkipBounds"); + Path testPath = path(TEST_FILE_PREFIX + "_testSkipBounds"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -230,7 +230,7 @@ public Long call() throws Exception { */ @Test public void testValidateSeekBounds() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testValidateSeekBounds"); + Path testPath = path(TEST_FILE_PREFIX + "_testValidateSeekBounds"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -281,7 +281,7 @@ public FSDataInputStream call() throws Exception { */ @Test public void testSeekAndAvailableAndPosition() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSeekAndAvailableAndPosition"); + Path testPath = path(TEST_FILE_PREFIX + "_testSeekAndAvailableAndPosition"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -347,7 +347,7 @@ public void testSeekAndAvailableAndPosition() throws Exception { */ @Test public void testSkipAndAvailableAndPosition() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSkipAndAvailableAndPosition"); + Path testPath = path(TEST_FILE_PREFIX + "_testSkipAndAvailableAndPosition"); long testFileLength = assumeHugeFileExists(testPath); try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { @@ -413,7 +413,8 @@ public void testSkipAndAvailableAndPosition() throws Exception { @Test public void testSequentialReadAfterReverseSeekPerformance() throws Exception { - Path testPath = new Path(TEST_FILE_PREFIX + "_testSequentialReadAfterReverseSeekPerformance"); + Path testPath = path( + TEST_FILE_PREFIX + "_testSequentialReadAfterReverseSeekPerformance"); assumeHugeFileExists(testPath); final int maxAttempts = 10; final double maxAcceptableRatio = 1.01; @@ -446,7 +447,7 @@ public void testSequentialReadAfterReverseSeekPerformance() public void testRandomReadPerformance() throws Exception { Assume.assumeFalse("This test does not support namespace enabled account", getIsNamespaceEnabled(getFileSystem())); - Path testPath = new Path(TEST_FILE_PREFIX + "_testRandomReadPerformance"); + Path testPath = path(TEST_FILE_PREFIX + "_testRandomReadPerformance"); assumeHugeFileExists(testPath); final AzureBlobFileSystem abFs = this.getFileSystem(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index ac1ec25d6d359..b12af5b0826ab 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -51,6 +51,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertRenameOutcome; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile; @@ -95,13 +96,13 @@ public void testRenameWithPreExistingDestination() throws Exception { @Test public void testRenameFileUnderDir() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path sourceDir = new Path("/testSrc"); + Path sourceDir = path("/testSrc"); assertMkdirs(fs, sourceDir); String filename = "file1"; Path file1 = new Path(sourceDir, filename); touch(file1); - Path destDir = new Path("/testDst"); + Path destDir = path("/testDst"); assertRenameOutcome(fs, sourceDir, destDir, true); FileStatus[] fileStatus = fs.listStatus(destDir); assertNotNull("Null file status", fileStatus); @@ -113,14 +114,15 @@ public void testRenameFileUnderDir() throws Exception { @Test public void testRenameDirectory() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - fs.mkdirs(new Path("testDir")); - Path test1 = new Path("testDir/test1"); + Path testDir = path("testDir"); + fs.mkdirs(testDir); + Path test1 = new Path(testDir + "/test1"); fs.mkdirs(test1); - fs.mkdirs(new Path("testDir/test1/test2")); - fs.mkdirs(new Path("testDir/test1/test2/test3")); + fs.mkdirs(new Path(testDir + "/test1/test2")); + fs.mkdirs(new Path(testDir + "/test1/test2/test3")); assertRenameOutcome(fs, test1, - new Path("testDir/test10"), true); + new Path(testDir + "/test10"), true); assertPathDoesNotExist(fs, "rename source dir", test1); } @@ -130,8 +132,9 @@ public void testRenameFirstLevelDirectory() throws Exception { final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); + Path source = path("/test"); for (int i = 0; i < 1000; i++) { - final Path fileName = new Path("/test/" + i); + final Path fileName = new Path(source + "/" + i); Callable callable = new Callable() { @Override public Void call() throws Exception { @@ -148,8 +151,7 @@ public Void call() throws Exception { } es.shutdownNow(); - Path source = new Path("/test"); - Path dest = new Path("/renamedDir"); + Path dest = path("/renamedDir"); assertRenameOutcome(fs, source, dest, true); FileStatus[] files = fs.listStatus(dest); @@ -173,14 +175,19 @@ public void testRenameRoot() throws Exception { @Test public void testPosixRenameDirectory() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - fs.mkdirs(new Path("testDir2/test1/test2/test3")); - fs.mkdirs(new Path("testDir2/test4")); - Assert.assertTrue(fs.rename(new Path("testDir2/test1/test2/test3"), new Path("testDir2/test4"))); - assertTrue(fs.exists(new Path("testDir2"))); - assertTrue(fs.exists(new Path("testDir2/test1/test2"))); - assertTrue(fs.exists(new Path("testDir2/test4"))); - assertTrue(fs.exists(new Path("testDir2/test4/test3"))); - assertFalse(fs.exists(new Path("testDir2/test1/test2/test3"))); + Path testDir2 = path("testDir2"); + fs.mkdirs(new Path(testDir2 + "/test1/test2/test3")); + fs.mkdirs(new Path(testDir2 + "/test4")); + Assert.assertTrue(fs.rename(new Path(testDir2 + "/test1/test2/test3"), new Path(testDir2 + "/test4"))); + assertPathExists(fs, "This path should exist", testDir2); + assertPathExists(fs, "This path should exist", + new Path(testDir2 + "/test1/test2")); + assertPathExists(fs, "This path should exist", + new Path(testDir2 + "/test4")); + assertPathExists(fs, "This path should exist", + new Path(testDir2 + "/test4/test3")); + assertPathDoesNotExist(fs, "This path should not exist", + new Path(testDir2 + "/test1/test2/test3")); } @Test @@ -306,7 +313,7 @@ private void testRenameTimeout( when(op.getResult()).thenReturn(http400Op); } else if (renameRequestStatus == HTTP_NOT_FOUND) { // Create the file new. - fs.create(destinationPath); + fs.create(destinationPath).close(); when(op.getResult()).thenReturn(http404Op); if (isOldOp) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java index 044c325c8c8dc..f913da7b15ed0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java @@ -76,7 +76,7 @@ public ITestAzureBlobFileSystemRenameUnicode() throws Exception { @Test public void testRenameFileUsingUnicode() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - Path folderPath1 = new Path(srcDir); + Path folderPath1 = path(srcDir); assertMkdirs(fs, folderPath1); assertIsDirectory(fs, folderPath1); Path filePath = new Path(folderPath1 + "/" + filename); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java index 03c3b49d6b253..d55f0ea4f6272 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.junit.Assume.assumeTrue; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; @@ -1297,7 +1298,7 @@ public void testSetOwnerForNonNamespaceEnabledAccount() throws Exception { final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", filePath); TracingHeaderValidator tracingHeaderValidator = new TracingHeaderValidator( conf.getClientCorrelationId(), fs.getFileSystemId(), @@ -1320,7 +1321,7 @@ public void testSetPermissionForNonNamespaceEnabledAccount() throws Exception { final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); - assertTrue(fs.exists(filePath)); + assertPathExists(fs, "This path should exist", filePath); FsPermission oldPermission = fs.getFileStatus(filePath).getPermission(); // default permission for non-namespace enabled account is "777" FsPermission newPermission = new FsPermission("557"); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java index 904e10f01e490..b7b1a3bba2db6 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestCustomerProvidedKey.java @@ -33,6 +33,7 @@ import java.util.Map; import java.util.Optional; import java.util.Random; +import java.util.UUID; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.fs.contract.ContractTestUtils; @@ -107,7 +108,7 @@ public ITestCustomerProvidedKey() throws Exception { @Test public void testReadWithCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - String fileName = "/" + methodName.getMethodName(); + String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -157,7 +158,7 @@ public void testReadWithCPK() throws Exception { @Test public void testReadWithoutCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(false); - String fileName = "/" + methodName.getMethodName(); + String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -196,7 +197,7 @@ public void testReadWithoutCPK() throws Exception { @Test public void testAppendWithCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - final String fileName = "/" + methodName.getMethodName(); + final String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); // Trying to append with correct CPK headers @@ -241,7 +242,7 @@ public void testAppendWithCPK() throws Exception { @Test public void testAppendWithoutCPK() throws Exception { final AzureBlobFileSystem fs = getAbfs(false); - final String fileName = "/" + methodName.getMethodName(); + final String fileName = path("/" + methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); // Trying to append without CPK headers @@ -277,7 +278,7 @@ public void testAppendWithoutCPK() throws Exception { @Test public void testSetGetXAttr() throws Exception { final AzureBlobFileSystem fs = getAbfs(true); - String fileName = methodName.getMethodName(); + final String fileName = path(methodName.getMethodName()).toString(); createFileAndGetContent(fs, fileName, FILE_SIZE); String valSent = "testValue"; @@ -325,7 +326,8 @@ public void testCopyBetweenAccounts() throws Exception { AzureBlobFileSystem fs1 = getAbfs(true); int fileSize = FILE_SIZE_FOR_COPY_BETWEEN_ACCOUNTS; byte[] fileContent = getRandomBytesArray(fileSize); - Path testFilePath = createFileWithContent(fs1, "fs1-file.txt", fileContent); + Path testFilePath = createFileWithContent(fs1, + String.format("fs1-file%s.txt", UUID.randomUUID()), fileContent); // Create fs2 with different CPK Configuration conf = new Configuration(); @@ -340,7 +342,8 @@ public void testCopyBetweenAccounts() throws Exception { AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf); // Read from fs1 and write to fs2, fs1 and fs2 are having different CPK - Path fs2DestFilePath = new Path("fs2-dest-file.txt"); + Path fs2DestFilePath = new Path( + String.format("fs2-dest-file%s.txt", UUID.randomUUID())); FSDataOutputStream ops = fs2.create(fs2DestFilePath); try (FSDataInputStream iStream = fs1.open(testFilePath)) { long totalBytesRead = 0; @@ -408,8 +411,8 @@ public void testListPathWithoutCPK() throws Exception { private void testListPath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - String testDirName = "/" + methodName.getMethodName(); - final Path testPath = new Path(testDirName); + final Path testPath = path("/" + methodName.getMethodName()); + String testDirName = testPath.toString(); fs.mkdirs(testPath); createFileAndGetContent(fs, testDirName + "/aaa", FILE_SIZE); createFileAndGetContent(fs, testDirName + "/bbb", FILE_SIZE); @@ -468,7 +471,8 @@ public void testCreatePathWithoutCPK() throws Exception { private void testCreatePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -511,7 +515,8 @@ public void testRenamePathWithoutCPK() throws Exception { private void testRenamePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); FileStatus fileStatusBeforeRename = fs @@ -546,15 +551,17 @@ public void testFlushWithoutCPK() throws Exception { private void testFlush(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); - fs.create(new Path(testFileName)); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); + fs.create(new Path(testFileName)).close(); AbfsClient abfsClient = fs.getAbfsClient(); String expectedCPKSha = getCPKSha(fs); byte[] fileContent = getRandomBytesArray(FILE_SIZE); Path testFilePath = new Path(testFileName + "1"); - FSDataOutputStream oStream = fs.create(testFilePath); - oStream.write(fileContent); + try (FSDataOutputStream oStream = fs.create(testFilePath)) { + oStream.write(fileContent); + } // Trying to read with different CPK headers Configuration conf = fs.getConf(); @@ -605,7 +612,8 @@ public void testSetPathPropertiesWithoutCPK() throws Exception { private void testSetPathProperties(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -635,7 +643,8 @@ public void testGetPathStatusFileWithoutCPK() throws Exception { private void testGetPathStatusFile(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -672,7 +681,8 @@ public void testDeletePathWithoutCPK() throws Exception { private void testDeletePath(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); createFileAndGetContent(fs, testFileName, FILE_SIZE); FileStatus[] listStatuses = fs.listStatus(new Path(testFileName)); @@ -702,7 +712,8 @@ public void testSetPermissionWithoutCPK() throws Exception { private void testSetPermission(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); Assume.assumeTrue(fs.getIsNamespaceEnabled(getTestTracingContext(fs, false))); createFileAndGetContent(fs, testFileName, FILE_SIZE); AbfsClient abfsClient = fs.getAbfsClient(); @@ -727,7 +738,8 @@ public void testSetAclWithoutCPK() throws Exception { private void testSetAcl(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); TracingContext tracingContext = getTestTracingContext(fs, false); Assume.assumeTrue(fs.getIsNamespaceEnabled(tracingContext)); createFileAndGetContent(fs, testFileName, FILE_SIZE); @@ -756,7 +768,8 @@ public void testGetAclWithoutCPK() throws Exception { private void testGetAcl(final boolean isWithCPK) throws Exception { final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); TracingContext tracingContext = getTestTracingContext(fs, false); Assume.assumeTrue(fs.getIsNamespaceEnabled(tracingContext)); createFileAndGetContent(fs, testFileName, FILE_SIZE); @@ -786,8 +799,9 @@ private void testCheckAccess(final boolean isWithCPK) throws Exception { getAuthType() == AuthType.OAuth); final AzureBlobFileSystem fs = getAbfs(isWithCPK); - final String testFileName = "/" + methodName.getMethodName(); - fs.create(new Path(testFileName)); + final String testFileName = path("/" + methodName.getMethodName()) + .toString(); + fs.create(new Path(testFileName)).close(); AbfsClient abfsClient = fs.getAbfsClient(); AbfsRestOperation abfsRestOperation = abfsClient .checkAccess(testFileName, "rwx", getTestTracingContext(fs, false)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java index c478118757107..0ccef2e6ccb34 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java @@ -33,16 +33,22 @@ */ public class ITestFileSystemProperties extends AbstractAbfsIntegrationTest { private static final int TEST_DATA = 100; - private static final Path TEST_PATH = new Path("/testfile"); + private static final String TEST_PATH = "/testfile"; public ITestFileSystemProperties() throws Exception { } @Test public void testReadWriteBytesToFileAndEnsureThreadPoolCleanup() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - testWriteOneByteToFileAndEnsureThreadPoolCleanup(); + Path testPath = path(TEST_PATH); + try(FSDataOutputStream stream = fs.create(testPath)) { + stream.write(TEST_DATA); + } + + FileStatus fileStatus = fs.getFileStatus(testPath); + assertEquals(1, fileStatus.getLen()); - try(FSDataInputStream inputStream = fs.open(TEST_PATH, 4 * 1024 * 1024)) { + try(FSDataInputStream inputStream = fs.open(testPath, 4 * 1024 * 1024)) { int i = inputStream.read(); assertEquals(TEST_DATA, i); } @@ -51,11 +57,12 @@ public void testReadWriteBytesToFileAndEnsureThreadPoolCleanup() throws Exceptio @Test public void testWriteOneByteToFileAndEnsureThreadPoolCleanup() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); - try(FSDataOutputStream stream = fs.create(TEST_PATH)) { + Path testPath = path(TEST_PATH); + try(FSDataOutputStream stream = fs.create(testPath)) { stream.write(TEST_DATA); } - FileStatus fileStatus = fs.getFileStatus(TEST_PATH); + FileStatus fileStatus = fs.getFileStatus(testPath); assertEquals(1, fileStatus.getLen()); } @@ -78,11 +85,12 @@ public void testBase64PathProperties() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Hashtable properties = new Hashtable<>(); properties.put("key", "{ value: valueTest }"); - touch(TEST_PATH); + Path testPath = path(TEST_PATH); + touch(testPath); TracingContext tracingContext = getTestTracingContext(fs, true); - fs.getAbfsStore().setPathProperties(TEST_PATH, properties, tracingContext); + fs.getAbfsStore().setPathProperties(testPath, properties, tracingContext); Hashtable fetchedProperties = fs.getAbfsStore() - .getPathStatus(TEST_PATH, tracingContext); + .getPathStatus(testPath, tracingContext); assertEquals(properties, fetchedProperties); } @@ -105,11 +113,12 @@ public void testBase64InvalidPathProperties() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Hashtable properties = new Hashtable<>(); properties.put("key", "{ value: valueTest兩 }"); - touch(TEST_PATH); + Path testPath = path(TEST_PATH); + touch(testPath); TracingContext tracingContext = getTestTracingContext(fs, true); - fs.getAbfsStore().setPathProperties(TEST_PATH, properties, tracingContext); + fs.getAbfsStore().setPathProperties(testPath, properties, tracingContext); Hashtable fetchedProperties = fs.getAbfsStore() - .getPathStatus(TEST_PATH, tracingContext); + .getPathStatus(testPath, tracingContext); assertEquals(properties, fetchedProperties); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java index 363e1e1cc2a69..0534cdda99fc8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java @@ -62,7 +62,8 @@ public void testListFileStatus() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); - Path path1 = new Path("/testfiles/~12/!008/3/abFsTestfile"); + Path testFiles = path("/testfiles"); + Path path1 = new Path(testFiles + "/~12/!008/3/abFsTestfile"); try(FSDataOutputStream abfsStream = fs.create(path1, true)) { abfsStream.write(ABFS_TEST_CONTEXT.getBytes()); abfsStream.flush(); @@ -70,7 +71,7 @@ public void testListFileStatus() throws Exception { } // create file using wasb - Path path2 = new Path("/testfiles/~12/!008/3/nativeFsTestfile"); + Path path2 = new Path(testFiles + "/~12/!008/3/nativeFsTestfile"); LOG.info("{}", wasb.getUri()); try(FSDataOutputStream nativeFsStream = wasb.create(path2, true)) { nativeFsStream.write(WASB_TEST_CONTEXT.getBytes()); @@ -78,8 +79,8 @@ public void testListFileStatus() throws Exception { nativeFsStream.hsync(); } // list file using abfs and wasb - FileStatus[] abfsFileStatus = fs.listStatus(new Path("/testfiles/~12/!008/3/")); - FileStatus[] nativeFsFileStatus = wasb.listStatus(new Path("/testfiles/~12/!008/3/")); + FileStatus[] abfsFileStatus = fs.listStatus(new Path(testFiles + "/~12/!008/3/")); + FileStatus[] nativeFsFileStatus = wasb.listStatus(new Path(testFiles + "/~12/!008/3/")); assertEquals(2, abfsFileStatus.length); assertEquals(2, nativeFsFileStatus.length); @@ -97,8 +98,9 @@ public void testReadFile() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); + Path testFile = path("/testReadFile"); for (int i = 0; i< 4; i++) { - Path path = new Path("/testReadFile/~12/!008/testfile" + i); + Path path = new Path(testFile + "/~12/!008/testfile" + i); final FileSystem createFs = createFileWithAbfs[i] ? abfs : wasb; // Write @@ -137,8 +139,9 @@ public void testDir() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); + Path testDir = path("/testDir"); for (int i = 0; i < 4; i++) { - Path path = new Path("/testDir/t" + i); + Path path = new Path(testDir + "/t" + i); //create final FileSystem createFs = createDirWithAbfs[i] ? abfs : wasb; assertTrue(createFs.mkdirs(path)); @@ -172,11 +175,12 @@ public void testSetWorkingDirectory() throws Exception { NativeAzureFileSystem wasb = getWasbFileSystem(); - Path d1d4 = new Path("/d1/d2/d3/d4"); + Path d1 = path("/d1"); + Path d1d4 = new Path(d1 + "/d2/d3/d4"); assertMkdirs(abfs, d1d4); //set working directory to path1 - Path path1 = new Path("/d1/d2"); + Path path1 = new Path(d1 + "/d2"); wasb.setWorkingDirectory(path1); abfs.setWorkingDirectory(path1); assertEquals(path1, wasb.getWorkingDirectory()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java index fff005114fbe0..431c456ae3daa 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java @@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; @@ -32,7 +31,7 @@ * Test create operation. */ public class ITestAbfsOutputStream extends AbstractAbfsIntegrationTest { - private static final Path TEST_FILE_PATH = new Path("testfile"); + private static final String TEST_FILE_PATH = "testfile"; public ITestAbfsOutputStream() throws Exception { super(); @@ -42,7 +41,7 @@ public ITestAbfsOutputStream() throws Exception { public void testMaxRequestsAndQueueCapacityDefaults() throws Exception { Configuration conf = getRawConfiguration(); final AzureBlobFileSystem fs = getFileSystem(conf); - try (FSDataOutputStream out = fs.create(TEST_FILE_PATH)) { + try (FSDataOutputStream out = fs.create(path(TEST_FILE_PATH))) { AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); int maxConcurrentRequests @@ -71,19 +70,18 @@ public void testMaxRequestsAndQueueCapacity() throws Exception { conf.set(ConfigurationKeys.AZURE_WRITE_MAX_REQUESTS_TO_QUEUE, "" + maxRequestsToQueue); final AzureBlobFileSystem fs = getFileSystem(conf); - FSDataOutputStream out = fs.create(TEST_FILE_PATH); - AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); + try (FSDataOutputStream out = fs.create(path(TEST_FILE_PATH))) { + AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); - if (stream.isAppendBlobStream()) { - maxConcurrentRequests = 1; - } + if (stream.isAppendBlobStream()) { + maxConcurrentRequests = 1; + } - Assertions.assertThat(stream.getMaxConcurrentRequestCount()) - .describedAs("maxConcurrentRequests should be " + maxConcurrentRequests) - .isEqualTo(maxConcurrentRequests); - Assertions.assertThat(stream.getMaxRequestsThatCanBeQueued()) - .describedAs("maxRequestsToQueue should be " + maxRequestsToQueue) - .isEqualTo(maxRequestsToQueue); + Assertions.assertThat(stream.getMaxConcurrentRequestCount()).describedAs( + "maxConcurrentRequests should be " + maxConcurrentRequests).isEqualTo(maxConcurrentRequests); + Assertions.assertThat(stream.getMaxRequestsThatCanBeQueued()).describedAs("maxRequestsToQueue should be " + maxRequestsToQueue) + .isEqualTo(maxRequestsToQueue); + } } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java index 5e73d8424ba8b..62326e0dbb353 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java @@ -583,13 +583,14 @@ public void testDefaultReadaheadQueueDepth() throws Exception { Configuration config = getRawConfiguration(); config.unset(FS_AZURE_READ_AHEAD_QUEUE_DEPTH); AzureBlobFileSystem fs = getFileSystem(config); - Path testFile = new Path("/testFile"); - fs.create(testFile); + Path testFile = path("/testFile"); + fs.create(testFile).close(); FSDataInputStream in = fs.open(testFile); Assertions.assertThat( ((AbfsInputStream) in.getWrappedStream()).getReadAheadQueueDepth()) .describedAs("readahead queue depth should be set to default value 2") .isEqualTo(2); + in.close(); } @@ -646,8 +647,7 @@ public AbfsInputStream testReadAheadConfigs(int readRequestSize, readAheadBlockSize = readRequestSize; } - Path testPath = new Path( - "/testReadAheadConfigs"); + Path testPath = path("/testReadAheadConfigs"); final AzureBlobFileSystem fs = createTestFile(testPath, ALWAYS_READ_BUFFER_SIZE_TEST_FILE_SIZE, config); byte[] byteBuffer = new byte[ONE_MB]; From c1908a641bc66b304cd6c552c018df7ff0c3e3c8 Mon Sep 17 00:00:00 2001 From: zhihaixu2012 Date: Tue, 27 Jul 2021 19:18:44 -0700 Subject: [PATCH 519/666] HDFS-16111. Add a configuration to RoundRobinVolumeChoosingPolicy to avoid failed volumes at datanodes. (#3175) Change-Id: Iead25812d4073e3980893e3e76f7d2b03b57442a Co-authored-by: Zhihai Xu --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 6 +++ .../RoundRobinVolumeChoosingPolicy.java | 29 ++++++++++- .../src/main/resources/hdfs-default.xml | 11 +++++ .../apache/hadoop/hdfs/MiniDFSCluster.java | 3 ++ .../TestRoundRobinVolumeChoosingPolicy.java | 48 ++++++++++++++++++- 5 files changed, 94 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 66e3c8b71edba..808ecfbe0c44e 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1112,6 +1112,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT = 1024L * 1024L * 1024L * 10L; // 10 GB public static final String DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction"; public static final float DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT = 0.75f; + public static final String + DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_KEY = + "dfs.datanode.round-robin-volume-choosing-policy.additional-available-space"; + public static final long + DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_DEFAULT = + 1024L * 1024L * 1024L; // 1 GB public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; public static final String DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java index 0f23e470969a7..fe010b35a4cd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java @@ -17,11 +17,16 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_KEY; + import java.io.IOException; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; @@ -30,7 +35,7 @@ * Use fine-grained locks to synchronize volume choosing. */ public class RoundRobinVolumeChoosingPolicy - implements VolumeChoosingPolicy { + implements VolumeChoosingPolicy, Configurable { public static final Logger LOG = LoggerFactory.getLogger(RoundRobinVolumeChoosingPolicy.class); @@ -41,6 +46,9 @@ public class RoundRobinVolumeChoosingPolicy // syncLocks stores the locks for each storage type. private Object[] syncLocks; + // The required additional available space when choosing a volume. + private long additionalAvailableSpace; + public RoundRobinVolumeChoosingPolicy() { int numStorageTypes = StorageType.values().length; curVolumes = new int[numStorageTypes]; @@ -50,6 +58,23 @@ public RoundRobinVolumeChoosingPolicy() { } } + @Override + public void setConf(Configuration conf) { + additionalAvailableSpace = conf.getLong( + DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_KEY, + DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_DEFAULT); + + LOG.info("Round robin volume choosing policy initialized: " + + DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_KEY + + " = " + additionalAvailableSpace); + } + + @Override + public Configuration getConf() { + // Nothing to do. Only added to fulfill the Configurable contract. + return null; + } + @Override public V chooseVolume(final List volumes, long blockSize, String storageId) throws IOException { @@ -83,7 +108,7 @@ private V chooseVolume(final int curVolumeIndex, final List volumes, final V volume = volumes.get(curVolume); curVolume = (curVolume + 1) % volumes.size(); long availableVolumeSize = volume.getAvailable(); - if (availableVolumeSize > blockSize) { + if (availableVolumeSize > blockSize + additionalAvailableSpace) { curVolumes[curVolumeIndex] = curVolume; return volume; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 58e709f77b4fe..5818ae1812836 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2657,6 +2657,17 @@ + + dfs.datanode.round-robin-volume-choosing-policy.additional-available-space + 1073741824 + + Only used when the dfs.datanode.fsdataset.volume.choosing.policy is set to + org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy. + This setting controls how much additional available space (unit is byte) is needed + when choosing a volume. + + + dfs.namenode.edits.noeditlogchannelflush false diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index f1b63acd96471..4de55d64063b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -533,6 +533,9 @@ private void initDefaultConfigurations() { DEFAULT_DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD); this.storagesPerDatanode = FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs(); + conf.setLong(DFSConfigKeys + .DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_KEY, + 0); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java index 44e2a30e5523c..fc99d3c7e54b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java @@ -17,10 +17,13 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_KEY; + import java.io.IOException; import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.ReflectionUtils; @@ -70,7 +73,50 @@ public static void testRR(VolumeChoosingPolicy policy) // Passed. } } - + + // Test the Round-Robin block-volume choosing algorithm with + // additional available space configured. + @Test + @SuppressWarnings("unchecked") + public void testRRWithAdditionalAvailableSpace() throws Exception { + Configuration conf = new Configuration(); + // Set the additional available space needed + conf.setLong( + DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_KEY, + 100); + final RoundRobinVolumeChoosingPolicy policy = + ReflectionUtils.newInstance(RoundRobinVolumeChoosingPolicy.class, conf); + testRRWithAdditionalAvailableSpace(policy); + } + + public static void testRRWithAdditionalAvailableSpace( + VolumeChoosingPolicy policy) throws Exception { + final List volumes = new ArrayList(); + + // First volume, with 100 bytes of space. + volumes.add(Mockito.mock(FsVolumeSpi.class)); + Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L); + + // Second volume, with 200 bytes of space. + volumes.add(Mockito.mock(FsVolumeSpi.class)); + Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L); + + // The first volume has only 100L space, so the policy should choose + // the second one with additional available space configured as 100L. + Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, + null)); + Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, + null)); + + // Fail if no volume can be chosen? + try { + policy.chooseVolume(volumes, 100, null); + Assert.fail(); + } catch (IOException e) { + // Passed. + } + } + // ChooseVolume should throw DiskOutOfSpaceException // with volume and block sizes in exception message. @Test From 0d9a15be7e7ffcf5db8647a6062e53d2288a8d48 Mon Sep 17 00:00:00 2001 From: 9uapaw Date: Wed, 28 Jul 2021 04:35:20 +0200 Subject: [PATCH 520/666] YARN-9551. TestTimelineClientV2Impl.testSyncCall fails intermittent (#3212) Co-authored-by: Andras Gyori --- .../api/impl/TestTimelineClientV2Impl.java | 36 ++++++++++++------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java index 6770c07fca4e9..8d437af3076ea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java @@ -248,24 +248,36 @@ public void testSyncCall() throws Exception { Thread.sleep(TIME_TO_SLEEP); } printReceivedEntities(); - Assert.assertEquals("TimelineEntities not published as desired", 3, - client.getNumOfTimelineEntitiesPublished()); + + boolean asyncPushesMerged = client.getNumOfTimelineEntitiesPublished() == 3; + int lastPublishIndex = asyncPushesMerged ? 2 : 3; + TimelineEntities firstPublishedEntities = client.getPublishedEntities(0); Assert.assertEquals("sync entities should not be merged with async", 1, firstPublishedEntities.getEntities().size()); - // test before pushing the sync entities asyncs are merged and pushed - TimelineEntities secondPublishedEntities = client.getPublishedEntities(1); - Assert.assertEquals( - "async entities should be merged before publishing sync", 2, - secondPublishedEntities.getEntities().size()); - Assert.assertEquals("Order of Async Events Needs to be FIFO", "2", - secondPublishedEntities.getEntities().get(0).getId()); - Assert.assertEquals("Order of Async Events Needs to be FIFO", "3", - secondPublishedEntities.getEntities().get(1).getId()); + // async push does not guarantee a merge but is FIFO + if (asyncPushesMerged) { + TimelineEntities secondPublishedEntities = client.getPublishedEntities(1); + Assert.assertEquals( + "async entities should be merged before publishing sync", 2, + secondPublishedEntities.getEntities().size()); + Assert.assertEquals("Order of Async Events Needs to be FIFO", "2", + secondPublishedEntities.getEntities().get(0).getId()); + Assert.assertEquals("Order of Async Events Needs to be FIFO", "3", + secondPublishedEntities.getEntities().get(1).getId()); + } else { + TimelineEntities secondAsyncPublish = client.getPublishedEntities(1); + Assert.assertEquals("Order of Async Events Needs to be FIFO", "2", + secondAsyncPublish.getEntities().get(0).getId()); + TimelineEntities thirdAsyncPublish = client.getPublishedEntities(2); + Assert.assertEquals("Order of Async Events Needs to be FIFO", "3", + thirdAsyncPublish.getEntities().get(0).getId()); + } // test the last entity published is sync put - TimelineEntities thirdPublishedEntities = client.getPublishedEntities(2); + TimelineEntities thirdPublishedEntities = + client.getPublishedEntities(lastPublishIndex); Assert.assertEquals("sync entities had to be published at the last", 1, thirdPublishedEntities.getEntities().size()); Assert.assertEquals("Expected last sync Event is not proper", "4", From 584a531e2e9631e8b26479f748b88ea0da89f65a Mon Sep 17 00:00:00 2001 From: bshashikant Date: Wed, 28 Jul 2021 10:29:00 +0530 Subject: [PATCH 521/666] HDFS-16145. CopyListing fails with FNF exception with snapshot diff. (#3234) --- .../org/apache/hadoop/tools/DistCpSync.java | 64 ++++++- .../apache/hadoop/tools/TestDistCpSync.java | 170 ++++++++++++++++++ 2 files changed, 231 insertions(+), 3 deletions(-) diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java index 6cbc936136231..6cb273e7e40bc 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.tools; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -37,6 +38,7 @@ import java.util.EnumMap; import java.util.ArrayList; import java.util.HashSet; +import java.util.Collections; /** * This class provides the basic functionality to sync two FileSystems based on @@ -59,6 +61,9 @@ class DistCpSync { // private EnumMap> diffMap; private DiffInfo[] renameDiffs; + // entries which are marked deleted because of rename to a excluded target + // path + private List deletedByExclusionDiffs; private CopyFilter copyFilter; DistCpSync(DistCpContext context, Configuration conf) { @@ -68,6 +73,11 @@ class DistCpSync { this.copyFilter.initialize(); } + @VisibleForTesting + public void setCopyFilter(CopyFilter copyFilter) { + this.copyFilter = copyFilter; + } + private boolean isRdiff() { return context.shouldUseRdiff(); } @@ -222,7 +232,7 @@ private boolean getAllDiffs() throws IOException { SnapshotDiffReport.DiffType.values()) { diffMap.put(type, new ArrayList()); } - + deletedByExclusionDiffs = null; for (SnapshotDiffReport.DiffReportEntry entry : report.getDiffList()) { // If the entry is the snapshot root, usually a item like "M\t." // in the diff report. We don't need to handle it and cannot handle it, @@ -250,8 +260,13 @@ private boolean getAllDiffs() throws IOException { list.add(new DiffInfo(source, target, dt)); } else { list = diffMap.get(SnapshotDiffReport.DiffType.DELETE); - list.add(new DiffInfo(source, target, - SnapshotDiffReport.DiffType.DELETE)); + DiffInfo info = new DiffInfo(source, target, + SnapshotDiffReport.DiffType.DELETE); + list.add(info); + if (deletedByExclusionDiffs == null) { + deletedByExclusionDiffs = new ArrayList<>(); + } + deletedByExclusionDiffs.add(info); } } else if (copyFilter.shouldCopy(relativeTarget)) { list = diffMap.get(SnapshotDiffReport.DiffType.CREATE); @@ -260,6 +275,9 @@ private boolean getAllDiffs() throws IOException { } } } + if (deletedByExclusionDiffs != null) { + Collections.sort(deletedByExclusionDiffs, DiffInfo.sourceComparator); + } return true; } catch (IOException e) { DistCp.LOG.warn("Failed to compute snapshot diff on " + ssDir, e); @@ -515,6 +533,33 @@ private DiffInfo getRenameItem(DiffInfo diff, DiffInfo[] renameDiffArray) { return null; } + /** + * checks if a parent dir is marked deleted as a part of dir rename happening + * to a path which is excluded by the the filter. + * @return true if it's marked deleted + */ + private boolean isParentOrSelfMarkedDeleted(DiffInfo diff, + List deletedDirDiffArray) { + for (DiffInfo item : deletedDirDiffArray) { + if (item.getSource().equals(diff.getSource())) { + // The same path string may appear in: + // 1. both deleted and modified snapshot diff entries. + // 2. both deleted and created snapshot diff entries. + // Case 1 is the about same file/directory, whereas case 2 + // is about two different files/directories. + // We are finding case 1 here, thus we check against DiffType.MODIFY. + if (diff.getType() == SnapshotDiffReport.DiffType.MODIFY) { + return true; + } + } else if (isParentOf(item.getSource(), diff.getSource())) { + // If deleted entry is the parent of diff entry, then both MODIFY and + // CREATE diff entries should be handled. + return true; + } + } + return false; + } + /** * For a given sourcePath, get its real path if it or its parent was renamed. * @@ -567,6 +612,19 @@ public ArrayList prepareDiffListForCopyListing() { renameDiffsList.toArray(new DiffInfo[renameDiffsList.size()]); Arrays.sort(renameDiffArray, DiffInfo.sourceComparator); for (DiffInfo diff : modifyAndCreateDiffs) { + // In cases, where files/dirs got created after a snapshot is taken + // and then the parent dir is moved to location which is excluded by + // the filters. For example, files/dirs created inside a dir in an + // encryption zone in HDFS. When the parent dir gets deleted, it will be + // moved to trash within which is inside the encryption zone itself. + // If the trash path gets excluded by filters , the dir will be marked + // for DELETE for the target location. All the subsequent creates should + // for such dirs should be ignored as well as the modify operation + // on the dir itself. + if (deletedByExclusionDiffs != null && isParentOrSelfMarkedDeleted(diff, + deletedByExclusionDiffs)) { + continue; + } DiffInfo renameItem = getRenameItem(diff, renameDiffArray); if (renameItem == null) { diff.setTarget(diff.getSource()); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java index 220caaefdb43c..61c72746add7e 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java @@ -53,6 +53,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.regex.Pattern; public class TestDistCpSync { private MiniDFSCluster cluster; @@ -292,6 +293,175 @@ public void testSync() throws Exception { verifyCopy(dfs.getFileStatus(spath), dfs.getFileStatus(target), false); } + /** + * Test the basic functionality. + */ + @Test + public void testSync1() throws Exception { + Path srcpath = new Path(source, "encz-mock"); + dfs.mkdirs(srcpath); + dfs.mkdirs(new Path(source, "encz-mock/datedir")); + enableAndCreateFirstSnapshot(); + + // before sync, make some further changes on source + DFSTestUtil.createFile(dfs, new Path(source, "encz-mock/datedir/file1"), + BLOCK_SIZE, DATA_NUM, 0); + dfs.delete(new Path(source, "encz-mock/datedir"), true); + dfs.mkdirs(new Path(source, "encz-mock/datedir")); + DFSTestUtil.createFile(dfs, new Path(source, "encz-mock/datedir/file2"), + BLOCK_SIZE, DATA_NUM, 0); + dfs.createSnapshot(source, "s2"); + Assert.assertTrue(dfs.exists(new Path(source, "encz-mock/datedir/file2"))); + + SnapshotDiffReport report = dfs.getSnapshotDiffReport(source, "s1", "s2"); + System.out.println(report); + + DistCpSync distCpSync = new DistCpSync(context, conf); + + // do the sync + Assert.assertTrue(distCpSync.sync()); + // make sure the source path has been updated to the snapshot path + final Path spath = new Path(source, + HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s2"); + Assert.assertEquals(spath, context.getSourcePaths().get(0)); + + // build copy listing + final Path listingPath = new Path("/tmp/META/fileList.seq"); + CopyListing listing = + new SimpleCopyListing(conf, new Credentials(), distCpSync); + listing.buildListing(listingPath, context); + + Map copyListing = getListing(listingPath); + CopyMapper copyMapper = new CopyMapper(); + StubContext stubContext = new StubContext(conf, null, 0); + Mapper.Context mapContext = + stubContext.getContext(); + copyMapper.setup(mapContext); + for (Map.Entry entry : copyListing + .entrySet()) { + copyMapper.map(entry.getKey(), entry.getValue(), mapContext); + } + Assert.assertTrue(dfs.exists(new Path(target, "encz-mock/datedir/file2"))); + // verify the source and target now has the same structure + verifyCopy(dfs.getFileStatus(spath), dfs.getFileStatus(target), false); + } + + /** + * Test the basic functionality. + */ + @Test + public void testSyncNew() throws Exception { + Path srcpath = new Path(source, "encz-mock"); + dfs.mkdirs(srcpath); + dfs.mkdirs(new Path(source, "encz-mock/datedir")); + dfs.mkdirs(new Path(source, "trash")); + enableAndCreateFirstSnapshot(); + + // before sync, make some further changes on source + DFSTestUtil.createFile(dfs, new Path(source, "encz-mock/datedir/file1"), + BLOCK_SIZE, DATA_NUM, 0); + dfs.rename(new Path(source, "encz-mock/datedir"), + new Path(source, "trash")); + dfs.mkdirs(new Path(source, "encz-mock/datedir")); + DFSTestUtil.createFile(dfs, new Path(source, "encz-mock/datedir/file2"), + BLOCK_SIZE, DATA_NUM, 0); + dfs.createSnapshot(source, "s2"); + Assert.assertTrue(dfs.exists(new Path(source, "encz-mock/datedir/file2"))); + + SnapshotDiffReport report = dfs.getSnapshotDiffReport(source, "s1", "s2"); + System.out.println(report); + + DistCpSync distCpSync = new DistCpSync(context, conf); + + // do the sync + Assert.assertTrue(distCpSync.sync()); + // make sure the source path has been updated to the snapshot path + final Path spath = new Path(source, + HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s2"); + Assert.assertEquals(spath, context.getSourcePaths().get(0)); + + // build copy listing + final Path listingPath = new Path("/tmp/META/fileList.seq"); + CopyListing listing = + new SimpleCopyListing(conf, new Credentials(), distCpSync); + listing.buildListing(listingPath, context); + + Map copyListing = getListing(listingPath); + CopyMapper copyMapper = new CopyMapper(); + StubContext stubContext = new StubContext(conf, null, 0); + Mapper.Context mapContext = + stubContext.getContext(); + copyMapper.setup(mapContext); + for (Map.Entry entry : copyListing + .entrySet()) { + copyMapper.map(entry.getKey(), entry.getValue(), mapContext); + } + Assert.assertTrue(dfs.exists(new Path(target, "encz-mock/datedir/file2"))); + Assert.assertTrue(dfs.exists(new Path(target, "trash/datedir/file1"))); + // verify the source and target now has the same structure + verifyCopy(dfs.getFileStatus(spath), dfs.getFileStatus(target), false); + } + + /** + * Test the basic functionality. + */ + @Test + public void testSyncWithFilters() throws Exception { + Path srcpath = new Path(source, "encz-mock"); + dfs.mkdirs(srcpath); + dfs.mkdirs(new Path(source, "encz-mock/datedir")); + dfs.mkdirs(new Path(source, "trash")); + enableAndCreateFirstSnapshot(); + + // before sync, make some further changes on source + DFSTestUtil.createFile(dfs, new Path(source, "encz-mock/datedir/file1"), + BLOCK_SIZE, DATA_NUM, 0); + dfs.rename(new Path(source, "encz-mock/datedir"), + new Path(source, "trash")); + dfs.mkdirs(new Path(source, "encz-mock/datedir")); + DFSTestUtil.createFile(dfs, new Path(source, "encz-mock/datedir/file2"), + BLOCK_SIZE, DATA_NUM, 0); + dfs.createSnapshot(source, "s2"); + Assert.assertTrue(dfs.exists(new Path(source, "encz-mock/datedir/file2"))); + + SnapshotDiffReport report = dfs.getSnapshotDiffReport(source, "s1", "s2"); + System.out.println(report); + List filters = new ArrayList<>(); + filters.add(Pattern.compile(".*trash.*")); + RegexCopyFilter regexCopyFilter = new RegexCopyFilter("fakeFile"); + regexCopyFilter.setFilters(filters); + + DistCpSync distCpSync = new DistCpSync(context, conf); + distCpSync.setCopyFilter(regexCopyFilter); + + // do the sync + Assert.assertTrue(distCpSync.sync()); + // make sure the source path has been updated to the snapshot path + final Path spath = new Path(source, + HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s2"); + Assert.assertEquals(spath, context.getSourcePaths().get(0)); + + // build copy listing + final Path listingPath = new Path("/tmp/META/fileList.seq"); + CopyListing listing = + new SimpleCopyListing(conf, new Credentials(), distCpSync); + listing.buildListing(listingPath, context); + + Map copyListing = getListing(listingPath); + CopyMapper copyMapper = new CopyMapper(); + StubContext stubContext = new StubContext(conf, null, 0); + Mapper.Context mapContext = + stubContext.getContext(); + copyMapper.setup(mapContext); + for (Map.Entry entry : copyListing + .entrySet()) { + copyMapper.map(entry.getKey(), entry.getValue(), mapContext); + } + Assert.assertTrue(dfs.exists(new Path(target, "encz-mock/datedir/file2"))); + Assert.assertFalse(dfs.exists(new Path(target, "encz-mock/datedir/file1"))); + Assert.assertFalse(dfs.exists(new Path(target, "trash/datedir/file1"))); + } + private Map getListing(Path listingPath) throws Exception { SequenceFile.Reader reader = new SequenceFile.Reader(conf, From 66ef9d6038ac5ba8d2b2358d5172ad3ffb888803 Mon Sep 17 00:00:00 2001 From: jianghuazhu <740087514@qq.com> Date: Wed, 28 Jul 2021 18:18:04 +0800 Subject: [PATCH 522/666] HDFS-16137.Improve the comments related to FairCallQueue#queues. (#3226) Co-authored-by: zhujianghua Reviewed-by: Viraj Jasani --- .../main/java/org/apache/hadoop/ipc/FairCallQueue.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java index 6db9348980159..84614a874d6e3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java @@ -25,6 +25,7 @@ import java.util.Iterator; import java.util.AbstractQueue; import java.util.HashMap; +import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.Semaphore; @@ -58,8 +59,12 @@ public class FairCallQueue extends AbstractQueue public static final Logger LOG = LoggerFactory.getLogger(FairCallQueue.class); - /* The queues */ - private final ArrayList> queues; + /** + * Save the queue data of multiple priority strategies. + * Usually the number of queue data and priority strategies saved + * is the same. + */ + private final List> queues; /* Track available permits for scheduled objects. All methods that will * mutate a subqueue must acquire or release a permit on the semaphore. From ac7f8733c2307e32ba677e7954227a254a63b96e Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Wed, 28 Jul 2021 14:49:10 +0200 Subject: [PATCH 523/666] YARN-10727. ParentQueue does not validate the queue on removal. Contributed by Andras Gyori --- .../scheduler/capacity/ParentQueue.java | 15 +++++++ ...CapacitySchedulerNewQueueAutoCreation.java | 45 +++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 8ff4d911a541b..e1307d7bd3f26 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -581,6 +581,21 @@ public void removeChildQueue(CSQueue queue) throws SchedulerDynamicEditException { writeLock.lock(); try { + if (!(queue instanceof AbstractCSQueue) || + !((AbstractCSQueue) queue).isDynamicQueue()) { + throw new SchedulerDynamicEditException("Queue " + getQueuePath() + + " can not remove " + queue.getQueuePath() + + " because it is not a dynamic queue"); + } + + // We need to check if the parent of the child queue is exactly this + // ParentQueue object + if (queue.getParent() != this) { + throw new SchedulerDynamicEditException("Queue " + getQueuePath() + + " can not remove " + queue.getQueuePath() + + " because it has a different parent queue"); + } + // Now we can do remove and update this.childQueues.remove(queue); this.scheduler.getCapacitySchedulerQueueManager() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java index 4229676e60b61..053a1354d1943 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java @@ -1162,6 +1162,51 @@ public void testRemoveDanglingAutoCreatedQueuesOnReinit() throws Exception { "when its dynamic parent is removed", bAutoLeaf); } + @Test + public void testParentQueueDynamicChildRemoval() throws Exception { + startScheduler(); + + createQueue("root.a.a-auto"); + createQueue("root.a.a-auto"); + AbstractCSQueue aAuto = (AbstractCSQueue) cs. + getQueue("root.a.a-auto"); + Assert.assertTrue(aAuto.isDynamicQueue()); + ParentQueue a = (ParentQueue) cs. + getQueue("root.a"); + createQueue("root.e.e1-auto"); + AbstractCSQueue eAuto = (AbstractCSQueue) cs. + getQueue("root.e.e1-auto"); + Assert.assertTrue(eAuto.isDynamicQueue()); + ParentQueue e = (ParentQueue) cs. + getQueue("root.e"); + + // Try to remove a static child queue + try { + a.removeChildQueue(cs.getQueue("root.a.a1")); + Assert.fail("root.a.a1 is a static queue and should not be removed at " + + "runtime"); + } catch (SchedulerDynamicEditException ignored) { + } + + // Try to remove a dynamic queue with a different parent + try { + a.removeChildQueue(eAuto); + Assert.fail("root.a should not be able to remove root.e.e1-auto"); + } catch (SchedulerDynamicEditException ignored) { + } + + a.removeChildQueue(aAuto); + e.removeChildQueue(eAuto); + + aAuto = (AbstractCSQueue) cs. + getQueue("root.a.a-auto"); + eAuto = (AbstractCSQueue) cs. + getQueue("root.e.e1-auto"); + + Assert.assertNull("root.a.a-auto should have been removed", aAuto); + Assert.assertNull("root.e.e1-auto should have been removed", eAuto); + } + protected LeafQueue createQueue(String queuePath) throws YarnException, IOException { return autoQueueHandler.createQueue( From d36008986907d1db5cb014d236ca38751525aa60 Mon Sep 17 00:00:00 2001 From: S O'Donnell Date: Wed, 28 Jul 2021 14:49:23 +0100 Subject: [PATCH 524/666] HDFS-16144. Revert HDFS-15372 (Files in snapshots no longer see attribute provider permissions). Contributed by Stephen O'Donnell --- .../hdfs/server/namenode/FSDirectory.java | 18 +- .../server/namenode/FSPermissionChecker.java | 44 ++--- .../hdfs/server/namenode/INodesInPath.java | 21 --- .../namenode/TestINodeAttributeProvider.java | 155 ++++++------------ 4 files changed, 71 insertions(+), 167 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 497aa84e767a5..9ff54e6f2b752 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2067,23 +2067,7 @@ INodeAttributes getAttributes(INodesInPath iip) // first empty component for the root. however file status // related calls are expected to strip out the root component according // to TestINodeAttributeProvider. - // Due to HDFS-15372 the attribute provider should received the resolved - // snapshot path. Ie, rather than seeing /d/.snapshot/sn/data it should - // see /d/data. However, for the path /d/.snapshot/sn it should see this - // full path. If the current inode is the snapshot name, it always has the - // same ID as its parent inode, so we can use that to check if it is the - // path which needs handled specially. - byte[][] components; - INodeDirectory parent = node.getParent(); - if (iip.isSnapshot() - && parent != null && parent.getId() != node.getId()) { - // For snapshot paths, we always user node.getPathComponents so the - // snapshot path is resolved to the real path, unless the last component - // is the snapshot name root directory. - components = node.getPathComponents(); - } else { - components = iip.getPathComponents(); - } + byte[][] components = iip.getPathComponents(); components = Arrays.copyOfRange(components, 1, components.length); nodeAttrs = ap.getAttributes(components, nodeAttrs); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index e8e292761d40c..324cd5d441201 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Stack; @@ -265,7 +264,7 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length]; final byte[][] components = inodesInPath.getPathComponents(); for (int i = 0; i < inodes.length && inodes[i] != null; i++) { - inodeAttrs[i] = getINodeAttrs(inodes[i], snapshotId); + inodeAttrs[i] = getINodeAttrs(components, i, inodes[i], snapshotId); } String path = inodesInPath.getPath(); @@ -315,7 +314,8 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, void checkPermission(INode inode, int snapshotId, FsAction access) throws AccessControlException { byte[][] pathComponents = inode.getPathComponents(); - INodeAttributes nodeAttributes = getINodeAttrs(inode, snapshotId); + INodeAttributes nodeAttributes = getINodeAttrs(pathComponents, + pathComponents.length - 1, inode, snapshotId); try { INodeAttributes[] iNodeAttr = {nodeAttributes}; AccessControlEnforcer enforcer = getAccessControlEnforcer(); @@ -424,31 +424,23 @@ public void checkPermissionWithContext( authzContext.getSubAccess(), authzContext.isIgnoreEmptyDir()); } - private INodeAttributes getINodeAttrs(INode inode, int snapshotId) { + private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx, + INode inode, int snapshotId) { INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId); - /** - * This logic is similar to {@link FSDirectory#getAttributes()} and it - * ensures that the attribute provider sees snapshot paths resolved to their - * original location. This means the attributeProvider can apply permissions - * to the snapshot paths in the same was as the live paths. See HDFS-15372. - */ if (getAttributesProvider() != null) { + String[] elements = new String[pathIdx + 1]; /** - * If we have an inode representing a path like /d/.snapshot/snap1 - * then calling inode.getPathComponents returns [null, d, snap1]. If we - * call inode.getFullPathName() it will return /d/.snapshot/snap1. For - * this special path (snapshot root) the attribute provider should see: - * - * [null, d, .snapshot/snap1] - * - * Using IIP.resolveFromRoot, it will take the inode fullPathName and - * construct an IIP object that give the correct components as above. + * {@link INode#getPathComponents(String)} returns a null component + * for the root only path "/". Assign an empty string if so. */ - INodesInPath iip = INodesInPath.resolveFromRoot(inode); - byte[][] components = iip.getPathComponents(); - components = Arrays.copyOfRange(components, 1, components.length); - inodeAttrs = getAttributesProvider() - .getAttributes(components, inodeAttrs); + if (pathByNameArr.length == 1 && pathByNameArr[0] == null) { + elements[0] = ""; + } else { + for (int i = 0; i < elements.length; i++) { + elements[i] = DFSUtil.bytes2String(pathByNameArr[i]); + } + } + inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs); } return inodeAttrs; } @@ -504,7 +496,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (!(cList.isEmpty() && ignoreEmptyDir)) { //TODO have to figure this out with inodeattribute provider INodeAttributes inodeAttr = - getINodeAttrs(d, snapshotId); + getINodeAttrs(components, pathIdx, d, snapshotId); if (!hasPermission(inodeAttr, access)) { throw new AccessControlException( toAccessControlString(inodeAttr, d.getFullPathName(), access)); @@ -522,7 +514,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (inodeAttr.getFsPermission().getStickyBit()) { for (INode child : cList) { INodeAttributes childInodeAttr = - getINodeAttrs(child, snapshotId); + getINodeAttrs(components, pathIdx, child, snapshotId); if (isStickyBitViolated(inodeAttr, childInodeAttr)) { List allComponentList = new ArrayList<>(); for (int i = 0; i <= pathIdx; ++i) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index 8a150f0630f6b..c2cdd48d4952a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -135,27 +135,6 @@ static INodesInPath resolve(final INodeDirectory startingDir, return resolve(startingDir, components, false); } - /** - * Retrieves the existing INodes from a path, starting at the root directory. - * The root directory is located by following the parent link in the inode - * recursively until the final root inode is found. - * The inodes returned will depend upon the output of inode.getFullPathName(). - * For a snapshot path, like /data/.snapshot/snap1, it will be resolved to: - * [null, data, .snapshot/snap1] - * For a file in the snapshot, as inode.getFullPathName resolves the snapshot - * information, the returned inodes for a path like /data/.snapshot/snap1/d1 - * would be: - * [null, data, d1] - * @param inode the {@link INode} to be resolved - * @return INodesInPath - */ - static INodesInPath resolveFromRoot(INode inode) { - INode[] inodes = getINodes(inode); - byte[][] paths = INode.getPathComponents(inode.getFullPathName()); - INodeDirectory rootDir = inodes[0].asDirectory(); - return resolve(rootDir, paths); - } - static INodesInPath resolve(final INodeDirectory startingDir, byte[][] components, final boolean isRaw) { Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java index 512d102983596..776a1981ce4e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.conf.Configuration; @@ -33,9 +34,9 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Lists; @@ -81,7 +82,6 @@ public void checkPermission(String fsOwner, String supergroup, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access); - CALLED.add("checkPermission|" + path); } @Override @@ -89,13 +89,13 @@ public void checkPermissionWithContext( AuthorizationContext authzContext) throws AccessControlException { if (authzContext.getAncestorIndex() > 1 && authzContext.getInodes()[1].getLocalName().equals("user") - && authzContext.getInodes()[2].getLocalName().equals("acl")) { + && authzContext.getInodes()[2].getLocalName().equals("acl") + || runPermissionCheck) { this.ace.checkPermissionWithContext(authzContext); } CALLED.add("checkPermission|" + authzContext.getAncestorAccess() + "|" + authzContext.getParentAccess() + "|" + authzContext .getAccess()); - CALLED.add("checkPermission|" + authzContext.getPath()); } } @@ -112,12 +112,7 @@ public void stop() { @Override public INodeAttributes getAttributes(String[] pathElements, final INodeAttributes inode) { - String fullPath = String.join("/", pathElements); - if (!fullPath.startsWith("/")) { - fullPath = "/" + fullPath; - } CALLED.add("getAttributes"); - CALLED.add("getAttributes|"+fullPath); final boolean useDefault = useDefault(pathElements); final boolean useNullAcl = useNullAclFeature(pathElements); return new INodeAttributes() { @@ -495,109 +490,63 @@ public Void run() throws Exception { } @Test - // HDFS-15372 - Attribute provider should not see the snapshot path as it - // should be resolved into the original path name before it hits the provider. - public void testAttrProviderSeesResolvedSnapahotPaths() throws Exception { + // See HDFS-16132 where an issue was reported after HDFS-15372. The sequence + // of operations here causes that change to break and the test fails with: + // org.apache.hadoop.ipc.RemoteException(java.lang.AssertionError): + // Absolute path required, but got 'foo' + // at org.apache.hadoop.hdfs.server.namenode.INode.checkAbsolutePath + // (INode.java:838) + // at org.apache.hadoop.hdfs.server.namenode.INode.getPathComponents + // (INode.java:813) + // After reverting HDFS-15372 the test passes, so including this test in the + // revert for future reference. + public void testAttrProviderWorksCorrectlyOnRenamedSnapshotPaths() + throws Exception { + runPermissionCheck = true; FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); DistributedFileSystem hdfs = miniDFS.getFileSystem(); - final Path userPath = new Path("/user"); - final Path authz = new Path("/user/authz"); - final Path authzChild = new Path("/user/authz/child2"); - - fs.mkdirs(userPath); - fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION)); - fs.mkdirs(authz); - hdfs.allowSnapshot(userPath); - fs.setPermission(authz, new FsPermission(HDFS_PERMISSION)); - fs.mkdirs(authzChild); - fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION)); - fs.createSnapshot(userPath, "snapshot_1"); - UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1", - new String[]{"g1"}); - ugi.doAs(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); - final Path snapChild = - new Path("/user/.snapshot/snapshot_1/authz/child2"); - // Run various methods on the path to access the attributes etc. - fs.getAclStatus(snapChild); - fs.getContentSummary(snapChild); - fs.getFileStatus(snapChild); - Assert.assertFalse(CALLED.contains("getAttributes|" + - snapChild.toString())); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz/child2")); - // The snapshot path should be seen by the permission checker, but when - // it checks access, the paths will be resolved so the attributeProvider - // only sees the resolved path. - Assert.assertTrue( - CALLED.contains("checkPermission|" + snapChild.toString())); - CALLED.clear(); - fs.getAclStatus(new Path("/")); - Assert.assertTrue(CALLED.contains("checkPermission|/")); - Assert.assertTrue(CALLED.contains("getAttributes|/")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user")); - Assert.assertTrue(CALLED.contains("checkPermission|/user")); - Assert.assertTrue(CALLED.contains("getAttributes|/user")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot")); - Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); - // attribute provider never sees the .snapshot path directly. - Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED - .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/authz")); - Assert.assertTrue(CALLED.contains("checkPermission|/user/authz")); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); - return null; - } - }); - // Delete the files / folders covered by the snapshot, then re-check they - // are all readable correctly. - fs.delete(authz, true); + final Path parent = new Path("/user"); + hdfs.mkdirs(parent); + fs.setPermission(parent, new FsPermission(HDFS_PERMISSION)); + final Path sub1 = new Path(parent, "sub1"); + final Path sub1foo = new Path(sub1, "foo"); + hdfs.mkdirs(sub1); + hdfs.mkdirs(sub1foo); + Path f = new Path(sub1foo, "file0"); + DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0); + hdfs.allowSnapshot(parent); + hdfs.createSnapshot(parent, "s0"); + + f = new Path(sub1foo, "file1"); + DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0); + f = new Path(sub1foo, "file2"); + DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0); + + final Path sub2 = new Path(parent, "sub2"); + hdfs.mkdirs(sub2); + final Path sub2foo = new Path(sub2, "foo"); + // mv /parent/sub1/foo to /parent/sub2/foo + hdfs.rename(sub1foo, sub2foo); + + hdfs.createSnapshot(parent, "s1"); + hdfs.createSnapshot(parent, "s2"); + + final Path sub3 = new Path(parent, "sub3"); + hdfs.mkdirs(sub3); + // mv /parent/sub2/foo to /parent/sub3/foo + hdfs.rename(sub2foo, sub3); + + hdfs.delete(sub3, true); + UserGroupInformation ugi = + UserGroupInformation.createUserForTesting("u1", new String[] {"g1"}); ugi.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); - + ((DistributedFileSystem)fs).getSnapshotDiffReport(parent, "s1", "s2"); CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot")); - Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); - // attribute provider never sees the .snapshot path directly. - Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED - .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); - return null; } }); - } } From 4d4da37a44a7223eaa7ce0ea95a5246031780b06 Mon Sep 17 00:00:00 2001 From: Mehakmeet Singh Date: Wed, 28 Jul 2021 20:04:43 +0530 Subject: [PATCH 525/666] HADOOP-17817. S3A to raise IOE if both S3-CSE and S3Guard enabled (#3239) Contributed by Mehakmeet Singh --- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 4 +++ .../markdown/tools/hadoop-aws/encryption.md | 1 + .../tools/hadoop-aws/troubleshooting_s3a.md | 25 +++++++++++++++++++ 3 files changed, 30 insertions(+) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 75f67a09f6483..255254983a7c4 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -539,6 +539,10 @@ public void initialize(URI name, Configuration originalConf) if (hasMetadataStore()) { LOG.debug("Using metadata store {}, authoritative store={}, authoritative path={}", getMetadataStore(), allowAuthoritativeMetadataStore, allowAuthoritativePaths); + if (isCSEEnabled) { + throw new PathIOException(uri.toString(), "S3-CSE cannot be used " + + "with S3Guard"); + } } // LOG if S3Guard is disabled on the warn level set in config diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md index 888ed8e211a9f..5fa6a3096b8a2 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md @@ -601,6 +601,7 @@ clients where S3-CSE has not been enabled. ### Limitations +- S3Guard is not supported with S3-CSE. - Performance will be reduced. All encrypt/decrypt is now being done on the client. - Writing files may be slower, as only a single block can be encrypted and diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md index 6cdb492d885fc..33dd165499203 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md @@ -1435,6 +1435,31 @@ The user trying to use the KMS Key ID should have the right permissions to acces If not, then add permission(or IAM role) in "Key users" section by selecting the AWS-KMS CMK Key on AWS console. +### S3-CSE cannot be used with S3Guard + +S3-CSE not supported for S3Guard enabled buckets. +``` +org.apache.hadoop.fs.PathIOException: `s3a://test-bucket': S3-CSE cannot be used with S3Guard + at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:543) + at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3460) + at org.apache.hadoop.fs.FileSystem.access$300(FileSystem.java:172) + at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3565) + at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3512) + at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:539) + at org.apache.hadoop.fs.Path.getFileSystem(Path.java:366) + at org.apache.hadoop.fs.shell.PathData.expandAsGlob(PathData.java:342) + at org.apache.hadoop.fs.shell.Command.expandArgument(Command.java:252) + at org.apache.hadoop.fs.shell.Command.expandArguments(Command.java:235) + at org.apache.hadoop.fs.shell.FsCommand.processRawArguments(FsCommand.java:105) + at org.apache.hadoop.fs.shell.Command.run(Command.java:179) + at org.apache.hadoop.fs.FsShell.run(FsShell.java:327) + at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:81) + at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:95) + at org.apache.hadoop.fs.FsShell.main(FsShell.java:390) +``` +If you want to use S3Guard then disable S3-CSE or disable S3Guard if you want +to use S3-CSE. + ### Message appears in logs "Not all bytes were read from the S3ObjectInputStream" From 60cf4ba1a4412362d062045992d6d03334686855 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Wed, 28 Jul 2021 16:50:14 +0200 Subject: [PATCH 526/666] YARN-10790. CS Flexible AQC: Add separate parent and leaf template property. Contributed by Andras Gyori --- .../capacity/AutoCreatedQueueTemplate.java | 102 ++++++++++++++++-- .../webapp/dao/CapacitySchedulerInfo.java | 14 ++- .../dao/CapacitySchedulerQueueInfo.java | 16 ++- .../helper/CapacitySchedulerInfoHelper.java | 5 +- .../TestAutoCreatedQueueTemplate.java | 41 +++++++ .../TestRMWebServicesCapacitySched.java | 6 +- .../TestRMWebServicesForCSWithPartitions.java | 2 +- 7 files changed, 164 insertions(+), 22 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java index 203ec4dcd6f0d..cb0f7891f6b7e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java @@ -30,6 +30,7 @@ import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.AUTO_QUEUE_CREATION_V2_PREFIX; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.ROOT; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.getQueuePrefix; /** * A handler for storing and setting auto created queue template settings. @@ -37,10 +38,17 @@ public class AutoCreatedQueueTemplate { public static final String AUTO_QUEUE_TEMPLATE_PREFIX = AUTO_QUEUE_CREATION_V2_PREFIX + "template."; + public static final String AUTO_QUEUE_LEAF_TEMPLATE_PREFIX = + AUTO_QUEUE_CREATION_V2_PREFIX + "leaf-template."; + public static final String AUTO_QUEUE_PARENT_TEMPLATE_PREFIX = + AUTO_QUEUE_CREATION_V2_PREFIX + "parent-template."; + private static final String WILDCARD_QUEUE = "*"; private static final int MAX_WILDCARD_LEVEL = 1; private final Map templateProperties = new HashMap<>(); + private final Map leafOnlyProperties = new HashMap<>(); + private final Map parentOnlyProperties = new HashMap<>(); public AutoCreatedQueueTemplate(Configuration configuration, String queuePath) { @@ -54,7 +62,7 @@ public static String getAutoQueueTemplatePrefix(String queue) { } /** - * Get the template properties attached to a parent queue. + * Get the common template properties specified for a parent queue. * @return template property names and values */ public Map getTemplateProperties() { @@ -62,13 +70,44 @@ public Map getTemplateProperties() { } /** - * Sets the configuration properties of a child queue based on its parent + * Get the leaf specific template properties specified for a parent queue. + * @return template property names and values + */ + public Map getLeafOnlyProperties() { + return leafOnlyProperties; + } + + /** + * Get the parent specific template properties specified for a parent queue. + * @return template property names and values + */ + public Map getParentOnlyProperties() { + return parentOnlyProperties; + } + + /** + * Sets the common template properties and parent specific template + * properties of a child queue based on its parent template settings. + * @param conf configuration to set + * @param childQueuePath child queue path used for prefixing the properties + */ + public void setTemplateEntriesForChild(Configuration conf, + String childQueuePath) { + setTemplateEntriesForChild(conf, childQueuePath, false); + } + + /** + * Sets the common template properties and leaf or parent specific template + * properties of a child queue based on its parent template settings. * template settings. * @param conf configuration to set + * @param isLeaf whether to include leaf specific template properties, or + * parent specific template properties * @param childQueuePath child queue path used for prefixing the properties */ public void setTemplateEntriesForChild(Configuration conf, - String childQueuePath) { + String childQueuePath, + boolean isLeaf) { if (childQueuePath.equals(ROOT)) { return; } @@ -77,7 +116,14 @@ public void setTemplateEntriesForChild(Configuration conf, Set alreadySetProps = conf.getPropsWithPrefix( CapacitySchedulerConfiguration.getQueuePrefix(childQueuePath)).keySet(); - for (Map.Entry entry : templateProperties.entrySet()) { + // Check template properties only set for leaf or parent queues + Map queueTypeSpecificTemplates = parentOnlyProperties; + if (isLeaf) { + queueTypeSpecificTemplates = leafOnlyProperties; + } + + for (Map.Entry entry : + queueTypeSpecificTemplates.entrySet()) { // Do not overwrite explicitly configured properties if (alreadySetProps.contains(entry.getKey())) { continue; @@ -85,6 +131,17 @@ public void setTemplateEntriesForChild(Configuration conf, conf.set(CapacitySchedulerConfiguration.getQueuePrefix( childQueuePath) + entry.getKey(), entry.getValue()); } + + for (Map.Entry entry : templateProperties.entrySet()) { + // Do not overwrite explicitly configured properties or properties set + // by queue type specific templates (parent-template and leaf-template) + if (alreadySetProps.contains(entry.getKey()) + || queueTypeSpecificTemplates.containsKey(entry.getKey())) { + continue; + } + conf.set(CapacitySchedulerConfiguration.getQueuePrefix( + childQueuePath) + entry.getKey(), entry.getValue()); + } } /** @@ -118,16 +175,15 @@ private void setTemplateConfigEntries(Configuration configuration, // Collect all template entries while (wildcardLevel <= supportedWildcardLevel) { - // Get all config entries with the specified prefix String templateQueuePath = String.join(".", queuePathParts); // Get all configuration entries with - // .auto-queue-creation-v2.template prefix - Map props = configuration.getPropsWithPrefix( - getAutoQueueTemplatePrefix(templateQueuePath)); + // yarn.scheduler.capacity. prefix + Map queueProps = configuration.getPropsWithPrefix( + getQueuePrefix(templateQueuePath)); - for (Map.Entry entry : props.entrySet()) { - // If an entry is already present, it had a higher precedence - templateProperties.putIfAbsent(entry.getKey(), entry.getValue()); + // Store template, parent-template and leaf-template properties + for (Map.Entry entry : queueProps.entrySet()) { + storeConfiguredTemplates(entry.getKey(), entry.getValue()); } // Replace a queue part with a wildcard based on the wildcard level @@ -138,4 +194,28 @@ private void setTemplateConfigEntries(Configuration configuration, ++wildcardLevel; } } + + private void storeConfiguredTemplates( + String templateKey, String templateValue) { + String prefix = ""; + Map properties = templateProperties; + + if (templateKey.startsWith(AUTO_QUEUE_TEMPLATE_PREFIX)) { + prefix = AUTO_QUEUE_TEMPLATE_PREFIX; + } else if (templateKey.startsWith(AUTO_QUEUE_LEAF_TEMPLATE_PREFIX)) { + prefix = AUTO_QUEUE_LEAF_TEMPLATE_PREFIX; + properties = leafOnlyProperties; + } else if (templateKey.startsWith( + AUTO_QUEUE_PARENT_TEMPLATE_PREFIX)) { + prefix = AUTO_QUEUE_PARENT_TEMPLATE_PREFIX; + properties = parentOnlyProperties; + } + + if (!prefix.isEmpty()) { + // Trim template prefix from key + String key = templateKey.substring(prefix.length()); + // If an entry is already present, it had a higher precedence + properties.putIfAbsent(key, templateValue); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java index cc02c23fd71c1..79d11e2b96126 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java @@ -63,6 +63,8 @@ public class CapacitySchedulerInfo extends SchedulerInfo { protected String autoCreationEligibility; protected String defaultNodeLabelExpression; protected AutoQueueTemplatePropertiesInfo autoQueueTemplateProperties; + protected AutoQueueTemplatePropertiesInfo autoQueueParentTemplateProperties; + protected AutoQueueTemplatePropertiesInfo autoQueueLeafTemplateProperties; @XmlTransient static final float EPSILON = 1e-8f; @@ -108,10 +110,18 @@ public CapacitySchedulerInfo(CSQueue parent, CapacityScheduler cs) { queuePriority = parent.getPriority().getPriority(); if (parent instanceof ParentQueue) { - orderingPolicyInfo = ((ParentQueue) parent).getQueueOrderingPolicy() + ParentQueue queue = (ParentQueue) parent; + orderingPolicyInfo = queue.getQueueOrderingPolicy() .getConfigName(); autoQueueTemplateProperties = CapacitySchedulerInfoHelper - .getAutoCreatedTemplate((ParentQueue) parent); + .getAutoCreatedTemplate(queue.getAutoCreatedQueueTemplate() + .getTemplateProperties()); + autoQueueParentTemplateProperties = CapacitySchedulerInfoHelper + .getAutoCreatedTemplate(queue.getAutoCreatedQueueTemplate() + .getParentOnlyProperties()); + autoQueueLeafTemplateProperties = CapacitySchedulerInfoHelper + .getAutoCreatedTemplate(queue.getAutoCreatedQueueTemplate() + .getLeafOnlyProperties()); } mode = CapacitySchedulerInfoHelper.getMode(parent); queueType = CapacitySchedulerInfoHelper.getQueueType(parent); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java index 152620d660335..452fa50343492 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java @@ -92,6 +92,10 @@ public class CapacitySchedulerQueueInfo { protected String defaultNodeLabelExpression; protected AutoQueueTemplatePropertiesInfo autoQueueTemplateProperties = new AutoQueueTemplatePropertiesInfo(); + protected AutoQueueTemplatePropertiesInfo autoQueueParentTemplateProperties = + new AutoQueueTemplatePropertiesInfo(); + protected AutoQueueTemplatePropertiesInfo autoQueueLeafTemplateProperties = + new AutoQueueTemplatePropertiesInfo(); CapacitySchedulerQueueInfo() { }; @@ -172,10 +176,18 @@ public class CapacitySchedulerQueueInfo { queuePriority = q.getPriority().getPriority(); if (q instanceof ParentQueue) { - orderingPolicyInfo = ((ParentQueue) q).getQueueOrderingPolicy() + ParentQueue queue = (ParentQueue) q; + orderingPolicyInfo = queue.getQueueOrderingPolicy() .getConfigName(); autoQueueTemplateProperties = CapacitySchedulerInfoHelper - .getAutoCreatedTemplate((ParentQueue) q); + .getAutoCreatedTemplate(queue.getAutoCreatedQueueTemplate() + .getTemplateProperties()); + autoQueueParentTemplateProperties = CapacitySchedulerInfoHelper + .getAutoCreatedTemplate(queue.getAutoCreatedQueueTemplate() + .getParentOnlyProperties()); + autoQueueLeafTemplateProperties = CapacitySchedulerInfoHelper + .getAutoCreatedTemplate(queue.getAutoCreatedQueueTemplate() + .getLeafOnlyProperties()); } String configuredCapacity = conf.get( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/helper/CapacitySchedulerInfoHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/helper/CapacitySchedulerInfoHelper.java index f71928b61bb3e..8b3602da31e1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/helper/CapacitySchedulerInfoHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/helper/CapacitySchedulerInfoHelper.java @@ -112,12 +112,11 @@ public static String getAutoCreationEligibility(CSQueue queue) { } public static AutoQueueTemplatePropertiesInfo getAutoCreatedTemplate( - ParentQueue parent) { + Map templateProperties) { AutoQueueTemplatePropertiesInfo propertiesInfo = new AutoQueueTemplatePropertiesInfo(); for (Map.Entry e : - parent.getAutoCreatedQueueTemplate().getTemplateProperties() - .entrySet()) { + templateProperties.entrySet()) { propertiesInfo.add(new ConfItem(e.getKey(), e.getValue())); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java index 2763af026a6b6..37f1378d7c641 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java @@ -22,10 +22,13 @@ import org.junit.Before; import org.junit.Test; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.AUTO_CREATE_CHILD_QUEUE_AUTO_REMOVAL_ENABLE; + public class TestAutoCreatedQueueTemplate { private static final String TEST_QUEUE_ABC = "root.a.b.c"; private static final String TEST_QUEUE_AB = "root.a.b"; private static final String TEST_QUEUE_A = "root.a"; + private static final String TEST_QUEUE_B = "root.b"; private static final String ROOT = "root"; private CapacitySchedulerConfiguration conf; @@ -120,8 +123,46 @@ public void testRootTemplate() { conf.getNonLabeledQueueWeight(TEST_QUEUE_A), 10e-6); } + @Test + public void testQueueSpecificTemplates() { + conf.set(getTemplateKey("root", "capacity"), "2w"); + conf.set(getLeafTemplateKey("root", + "default-node-label-expression"), "test"); + conf.set(getLeafTemplateKey("root", "capacity"), "10w"); + conf.setBoolean(getParentTemplateKey( + "root", AUTO_CREATE_CHILD_QUEUE_AUTO_REMOVAL_ENABLE), false); + + AutoCreatedQueueTemplate template = + new AutoCreatedQueueTemplate(conf, ROOT); + template.setTemplateEntriesForChild(conf, TEST_QUEUE_A); + template.setTemplateEntriesForChild(conf, TEST_QUEUE_B, true); + + Assert.assertNull("default-node-label-expression is set for parent", + conf.getDefaultNodeLabelExpression(TEST_QUEUE_A)); + Assert.assertEquals("default-node-label-expression is not set for leaf", + "test", conf.getDefaultNodeLabelExpression(TEST_QUEUE_B)); + Assert.assertFalse("auto queue removal is not disabled for parent", + conf.isAutoExpiredDeletionEnabled(TEST_QUEUE_A)); + Assert.assertEquals("weight should not be overridden when set by " + + "queue type specific template", + 10f, conf.getNonLabeledQueueWeight(TEST_QUEUE_B), 10e-6); + Assert.assertEquals("weight should be set by common template", + 2f, conf.getNonLabeledQueueWeight(TEST_QUEUE_A), 10e-6); + + } + private String getTemplateKey(String queuePath, String entryKey) { return CapacitySchedulerConfiguration.getQueuePrefix(queuePath) + AutoCreatedQueueTemplate.AUTO_QUEUE_TEMPLATE_PREFIX + entryKey; } + + private String getParentTemplateKey(String queuePath, String entryKey) { + return CapacitySchedulerConfiguration.getQueuePrefix(queuePath) + + AutoCreatedQueueTemplate.AUTO_QUEUE_PARENT_TEMPLATE_PREFIX + entryKey; + } + + private String getLeafTemplateKey(String queuePath, String entryKey) { + return CapacitySchedulerConfiguration.getQueuePrefix(queuePath) + + AutoCreatedQueueTemplate.AUTO_QUEUE_LEAF_TEMPLATE_PREFIX + entryKey; + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java index 459645facca17..b472e099f4b73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java @@ -369,7 +369,7 @@ private void verifyClusterScheduler(JSONObject json) throws JSONException, JSONObject info = json.getJSONObject("scheduler"); assertEquals("incorrect number of elements in: " + info, 1, info.length()); info = info.getJSONObject("schedulerInfo"); - assertEquals("incorrect number of elements in: " + info, 20, info.length()); + assertEquals("incorrect number of elements in: " + info, 22, info.length()); verifyClusterSchedulerGeneric(info.getString("type"), (float) info.getDouble("usedCapacity"), (float) info.getDouble("capacity"), @@ -424,10 +424,10 @@ private void verifyClusterSchedulerGeneric(String type, float usedCapacity, private void verifySubQueue(JSONObject info, String q, float parentAbsCapacity, float parentAbsMaxCapacity) throws JSONException, Exception { - int numExpectedElements = 35; + int numExpectedElements = 37; boolean isParentQueue = true; if (!info.has("queues")) { - numExpectedElements = 53; + numExpectedElements = 55; isParentQueue = false; } assertEquals("incorrect number of elements", numExpectedElements, info.length()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java index 2e67d69894091..36b27a232383b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java @@ -574,7 +574,7 @@ private void verifySchedulerInfoJson(JSONObject json) JSONObject info = json.getJSONObject("scheduler"); assertEquals("incorrect number of elements", 1, info.length()); info = info.getJSONObject("schedulerInfo"); - assertEquals("incorrect number of elements", 20, info.length()); + assertEquals("incorrect number of elements", 22, info.length()); JSONObject capacitiesJsonObject = info.getJSONObject(CAPACITIES); JSONArray partitionsCapsArray = capacitiesJsonObject.getJSONArray(QUEUE_CAPACITIES_BY_PARTITION); From 6a6b9998ab1e8af7326ade1268cf74a5d91b6816 Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Wed, 28 Jul 2021 17:02:15 +0200 Subject: [PATCH 527/666] YARN-6272. TestAMRMClient#testAMRMClientWithContainerResourceChange fails intermittently. Contributed by Andras Gyory & Prabhu Joseph --- .../yarn/client/api/impl/TestAMRMClient.java | 55 +++++++++++-------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java index 24666f066deec..fff27bd008471 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java @@ -956,7 +956,6 @@ private List allocateAndStartContainers( return containers; } - private void doContainerResourceChange( final AMRMClient amClient, List containers) throws YarnException, IOException { @@ -986,38 +985,50 @@ private void doContainerResourceChange( Resource.newInstance(512, 1), null)); assertEquals(Resource.newInstance(512, 1), amClientImpl.change.get(container1.getId()).getValue().getCapability()); - // request resource increase for container2 - amClientImpl.requestContainerUpdate(container2, - UpdateContainerRequest.newInstance(container2.getVersion(), - container2.getId(), ContainerUpdateType.INCREASE_RESOURCE, - Resource.newInstance(2048, 1), null)); - assertEquals(Resource.newInstance(2048, 1), - amClientImpl.change.get(container2.getId()).getValue().getCapability()); // verify release request will cancel pending change requests for the same // container amClientImpl.requestContainerUpdate(container3, UpdateContainerRequest.newInstance(container3.getVersion(), container3.getId(), ContainerUpdateType.INCREASE_RESOURCE, Resource.newInstance(2048, 1), null)); - assertEquals(3, amClientImpl.pendingChange.size()); - amClientImpl.releaseAssignedContainer(container3.getId()); assertEquals(2, amClientImpl.pendingChange.size()); + amClientImpl.releaseAssignedContainer(container3.getId()); + assertEquals(1, amClientImpl.pendingChange.size()); // as of now: container1 asks to decrease to (512, 1) // container2 asks to increase to (2048, 1) // send allocation requests - AllocateResponse allocResponse = amClient.allocate(0.1f); - assertEquals(0, amClientImpl.change.size()); - // we should get decrease confirmation right away - List updatedContainers = - allocResponse.getUpdatedContainers(); - assertEquals(1, updatedContainers.size()); // we should get increase allocation after the next NM's heartbeat to RM - triggerSchedulingWithNMHeartBeat(); - // get allocations - allocResponse = amClient.allocate(0.1f); - updatedContainers = - allocResponse.getUpdatedContainers(); - assertEquals(1, updatedContainers.size()); + assertUpdatedContainers(amClient, container1); + // request resource increase for container2 + amClientImpl.requestContainerUpdate(container2, + UpdateContainerRequest.newInstance(container2.getVersion(), + container2.getId(), ContainerUpdateType.INCREASE_RESOURCE, + Resource.newInstance(2048, 1), null)); + assertEquals(Resource.newInstance(2048, 1), + amClientImpl.change.get(container2.getId()).getValue().getCapability()); + assertUpdatedContainers(amClient, container2); + } + + private void assertUpdatedContainers(AMRMClient amClient, Container container) { + RMContext context = yarnCluster.getResourceManager().getRMContext(); + RMNode rmNode = context.getRMNodes().get(container.getNodeId()); + List updateResponse = new ArrayList<>(); + int allocationAttempts = 0; + while (allocationAttempts < 1000) { + context.getScheduler().handle(new NodeUpdateSchedulerEvent(rmNode)); + try { + updateResponse = amClient.allocate(0.1f).getUpdatedContainers(); + if (updateResponse.size() == 1) { + break; + } else { + allocationAttempts++; + sleep(20); + } + } catch (Exception ignored) { + } + } + + Assert.assertEquals("Container resource change update failed", 1, updateResponse.size()); } @Test From 31543fef047e00b53f37dd1ad975bbc1d2034df4 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Wed, 28 Jul 2021 22:40:07 +0530 Subject: [PATCH 528/666] HADOOP-17814. Provide fallbacks for identity/cost providers and backoff enable (#3230) Reviewed-by: Wei-Chiu Chuang Signed-off-by: Takanobu Asanuma --- .../apache/hadoop/ipc/DecayRpcScheduler.java | 24 ++++++++ .../java/org/apache/hadoop/ipc/Server.java | 33 +++++++++- .../src/main/resources/core-default.xml | 35 +++++++++++ .../conf/TestCommonConfigurationFields.java | 3 + .../hadoop/ipc/TestDecayRpcScheduler.java | 61 +++++++++++++++++++ 5 files changed, 154 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index 7f6f0c4723707..28f1a7fff1981 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -283,6 +283,18 @@ private CostProvider parseCostProvider(String ns, Configuration conf) { ns + "." + CommonConfigurationKeys.IPC_COST_PROVIDER_KEY, CostProvider.class); + if (providers.size() < 1) { + String[] nsPort = ns.split("\\."); + if (nsPort.length == 2) { + // Only if ns is split with ".", we can separate namespace and port. + // In the absence of "ipc..cost-provider.impl" property, + // we look up "ipc.cost-provider.impl" property. + providers = conf.getInstances( + nsPort[0] + "." + CommonConfigurationKeys.IPC_COST_PROVIDER_KEY, + CostProvider.class); + } + } + if (providers.size() < 1) { LOG.info("CostProvider not specified, defaulting to DefaultCostProvider"); return new DefaultCostProvider(); @@ -303,6 +315,18 @@ private IdentityProvider parseIdentityProvider(String ns, ns + "." + CommonConfigurationKeys.IPC_IDENTITY_PROVIDER_KEY, IdentityProvider.class); + if (providers.size() < 1) { + String[] nsPort = ns.split("\\."); + if (nsPort.length == 2) { + // Only if ns is split with ".", we can separate namespace and port. + // In the absence of "ipc..identity-provider.impl" property, + // we look up "ipc.identity-provider.impl" property. + providers = conf.getInstances( + nsPort[0] + "." + CommonConfigurationKeys.IPC_IDENTITY_PROVIDER_KEY, + IdentityProvider.class); + } + } + if (providers.size() < 1) { LOG.info("IdentityProvider not specified, " + "defaulting to UserIdentityProvider"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 8acdc0a99b4dc..1e67203f795c1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -831,12 +831,14 @@ public synchronized void refreshCallQueue(Configuration conf) { getSchedulerClass(CommonConfigurationKeys.IPC_NAMESPACE, port, conf), getQueueClass(CommonConfigurationKeys.IPC_NAMESPACE, port, conf), maxQueueSize, prefix, conf); - callQueue.setClientBackoffEnabled(getClientBackoffEnable(prefix, conf)); + callQueue.setClientBackoffEnabled(getClientBackoffEnable( + CommonConfigurationKeys.IPC_NAMESPACE, port, conf)); } /** * Get from config if client backoff is enabled on that port. */ + @Deprecated static boolean getClientBackoffEnable( String prefix, Configuration conf) { String name = prefix + "." + @@ -845,6 +847,32 @@ static boolean getClientBackoffEnable( CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT); } + /** + * Return boolean value configured by property 'ipc..backoff.enable' + * if it is present. If the config is not present, default config + * (without port) is used to derive class i.e 'ipc.backoff.enable', + * and derived value is returned if configured. Otherwise, default value + * {@link CommonConfigurationKeys#IPC_BACKOFF_ENABLE_DEFAULT} is returned. + * + * @param namespace Namespace "ipc". + * @param port Server's listener port. + * @param conf Configuration properties. + * @return Value returned based on configuration. + */ + static boolean getClientBackoffEnable( + String namespace, int port, Configuration conf) { + String name = namespace + "." + port + "." + + CommonConfigurationKeys.IPC_BACKOFF_ENABLE; + boolean valueWithPort = conf.getBoolean(name, + CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT); + if (valueWithPort != CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT) { + return valueWithPort; + } + return conf.getBoolean(namespace + "." + + CommonConfigurationKeys.IPC_BACKOFF_ENABLE, + CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT); + } + /** A generic call queued for handling. */ public static class Call implements Schedulable, PrivilegedExceptionAction { @@ -3184,7 +3212,8 @@ protected Server(String bindAddress, int port, this.callQueue = new CallQueueManager<>( getQueueClass(CommonConfigurationKeys.IPC_NAMESPACE, port, conf), getSchedulerClass(CommonConfigurationKeys.IPC_NAMESPACE, port, conf), - getClientBackoffEnable(prefix, conf), maxQueueSize, prefix, conf); + getClientBackoffEnable(CommonConfigurationKeys.IPC_NAMESPACE, port, conf), + maxQueueSize, prefix, conf); this.secretManager = (SecretManager) secretManager; this.authorize = diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index c01c2f8f4131b..dd66da42ae2c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -2504,6 +2504,17 @@ + + ipc.backoff.enable + false + + This property is used as fallback property in case + "ipc.[port_number].backoff.enable" is not defined. + It determines whether or not to enable client backoff when + a queue is full. + + + ipc.[port_number].callqueue.impl java.util.concurrent.LinkedBlockingQueue @@ -2586,6 +2597,17 @@ + + ipc.identity-provider.impl + org.apache.hadoop.ipc.UserIdentityProvider + + This property is used as fallback property in case + "ipc.[port_number].identity-provider.impl" is not defined. + The identity provider mapping user requests to their identity. + This property applies to DecayRpcScheduler. + + + ipc.[port_number].cost-provider.impl org.apache.hadoop.ipc.DefaultCostProvider @@ -2596,6 +2618,19 @@ + + ipc.cost-provider.impl + org.apache.hadoop.ipc.DefaultCostProvider + + This property is used as fallback property in case + "ipc.[port_number].cost-provider.impl" is not defined. + The cost provider mapping user requests to their cost. To + enable determination of cost based on processing time, use + org.apache.hadoop.ipc.WeightedTimeCostProvider. + This property applies to DecayRpcScheduler. + + + ipc.[port_number].decay-scheduler.period-ms 5000 diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java index 448c47c959901..9fcf4a5eb55a2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java @@ -156,6 +156,7 @@ public void initializeMemberVariables() { // FairCallQueue configs that includes dynamic ports in its keys xmlPropsToSkipCompare.add("ipc.[port_number].backoff.enable"); + xmlPropsToSkipCompare.add("ipc.backoff.enable"); xmlPropsToSkipCompare.add("ipc.[port_number].callqueue.impl"); xmlPropsToSkipCompare.add("ipc.callqueue.impl"); xmlPropsToSkipCompare.add("ipc.[port_number].scheduler.impl"); @@ -164,7 +165,9 @@ public void initializeMemberVariables() { xmlPropsToSkipCompare.add( "ipc.[port_number].faircallqueue.multiplexer.weights"); xmlPropsToSkipCompare.add("ipc.[port_number].identity-provider.impl"); + xmlPropsToSkipCompare.add("ipc.identity-provider.impl"); xmlPropsToSkipCompare.add("ipc.[port_number].cost-provider.impl"); + xmlPropsToSkipCompare.add("ipc.cost-provider.impl"); xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.period-ms"); xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.decay-factor"); xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.thresholds"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java index fee43b83dae7b..4ae3de1b15873 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java @@ -55,6 +55,29 @@ private Schedulable mockCall(String id) { return mockCall; } + private static class TestIdentityProvider implements IdentityProvider { + public String makeIdentity(Schedulable obj) { + UserGroupInformation ugi = obj.getUserGroupInformation(); + if (ugi == null) { + return null; + } + return ugi.getShortUserName(); + } + } + + private static class TestCostProvider implements CostProvider { + + @Override + public void init(String namespace, Configuration conf) { + // No-op + } + + @Override + public long getCost(ProcessingDetails details) { + return 1; + } + } + private DecayRpcScheduler scheduler; @Test(expected=IllegalArgumentException.class) @@ -83,6 +106,44 @@ public void testParsePeriod() { assertEquals(1058L, scheduler.getDecayPeriodMillis()); } + @Test + @SuppressWarnings("deprecation") + public void testParsePeriodWithPortLessIdentityProvider() { + // By default + scheduler = new DecayRpcScheduler(1, "ipc.50", new Configuration()); + assertEquals(DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_DEFAULT, + scheduler.getDecayPeriodMillis()); + + // Custom + Configuration conf = new Configuration(); + conf.setLong("ipc.51." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, + 1058); + conf.unset("ipc.51." + CommonConfigurationKeys.IPC_IDENTITY_PROVIDER_KEY); + conf.set("ipc." + CommonConfigurationKeys.IPC_IDENTITY_PROVIDER_KEY, + "org.apache.hadoop.ipc.TestDecayRpcScheduler$TestIdentityProvider"); + scheduler = new DecayRpcScheduler(1, "ipc.51", conf); + assertEquals(1058L, scheduler.getDecayPeriodMillis()); + } + + @Test + @SuppressWarnings("deprecation") + public void testParsePeriodWithPortLessCostProvider() { + // By default + scheduler = new DecayRpcScheduler(1, "ipc.52", new Configuration()); + assertEquals(DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_DEFAULT, + scheduler.getDecayPeriodMillis()); + + // Custom + Configuration conf = new Configuration(); + conf.setLong("ipc.52." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, + 1058); + conf.unset("ipc.52." + CommonConfigurationKeys.IPC_COST_PROVIDER_KEY); + conf.set("ipc." + CommonConfigurationKeys.IPC_COST_PROVIDER_KEY, + "org.apache.hadoop.ipc.TestDecayRpcScheduler$TestCostProvider"); + scheduler = new DecayRpcScheduler(1, "ipc.52", conf); + assertEquals(1058L, scheduler.getDecayPeriodMillis()); + } + @Test @SuppressWarnings("deprecation") public void testParseFactor() { From 4d1b6fd2fcb7c494cacea81e65acbebf332a8a2d Mon Sep 17 00:00:00 2001 From: Brian Loss Date: Wed, 28 Jul 2021 15:22:58 -0400 Subject: [PATCH 529/666] HADOOP-17811: ABFS ExponentialRetryPolicy doesn't pick up configuration values (#3221) Contributed by Brian Loss. --- .../fs/azurebfs/AzureBlobFileSystemStore.java | 2 +- .../services/ExponentialRetryPolicy.java | 11 +++++ .../hadoop-azure/src/site/markdown/abfs.md | 28 +++++++++++-- .../src/site/markdown/testing_azure.md | 4 +- .../services/TestExponentialRetryPolicy.java | 40 ++++++++++++++++--- 5 files changed, 74 insertions(+), 11 deletions(-) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index a3cd1532e675b..3a527f7f0c3c9 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -1524,7 +1524,7 @@ private void initializeClient(URI uri, String fileSystemName, private AbfsClientContext populateAbfsClientContext() { return new AbfsClientContextBuilder() .withExponentialRetryPolicy( - new ExponentialRetryPolicy(abfsConfiguration.getMaxIoRetries())) + new ExponentialRetryPolicy(abfsConfiguration)) .withAbfsCounters(abfsCounters) .withAbfsPerfTracker(abfsPerfTracker) .build(); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java index 9a75c78aa0612..89d99471a8214 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java @@ -21,6 +21,7 @@ import java.util.Random; import java.net.HttpURLConnection; +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** @@ -89,6 +90,16 @@ public ExponentialRetryPolicy(final int maxIoRetries) { DEFAULT_CLIENT_BACKOFF); } + /** + * Initializes a new instance of the {@link ExponentialRetryPolicy} class. + * + * @param conf The {@link AbfsConfiguration} from which to retrieve retry configuration. + */ + public ExponentialRetryPolicy(AbfsConfiguration conf) { + this(conf.getMaxIoRetries(), conf.getMinBackoffIntervalMilliseconds(), conf.getMaxBackoffIntervalMilliseconds(), + conf.getBackoffIntervalMilliseconds()); + } + /** * Initializes a new instance of the {@link ExponentialRetryPolicy} class. * diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md index e8eae8c489ea1..dfb7f3f42a5cf 100644 --- a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md +++ b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md @@ -338,7 +338,7 @@ with the following configurations. retries. Default value is 5. * `fs.azure.oauth.token.fetch.retry.min.backoff.interval`: Minimum back-off interval. Added to the retry interval computed from delta backoff. By - default this si set as 0. Set the interval in milli seconds. + default this is set as 0. Set the interval in milli seconds. * `fs.azure.oauth.token.fetch.retry.max.backoff.interval`: Maximum back-off interval. Default value is 60000 (sixty seconds). Set the interval in milli seconds. @@ -800,9 +800,31 @@ The following configs are related to read and write operations. `fs.azure.io.retry.max.retries`: Sets the number of retries for IO operations. Currently this is used only for the server call retry logic. Used within -AbfsClient class as part of the ExponentialRetryPolicy. The value should be +`AbfsClient` class as part of the ExponentialRetryPolicy. The value should be greater than or equal to 0. +`fs.azure.io.retry.min.backoff.interval`: Sets the minimum backoff interval for +retries of IO operations. Currently this is used only for the server call retry +logic. Used within `AbfsClient` class as part of the ExponentialRetryPolicy. This +value indicates the smallest interval (in milliseconds) to wait before retrying +an IO operation. The default value is 3000 (3 seconds). + +`fs.azure.io.retry.max.backoff.interval`: Sets the maximum backoff interval for +retries of IO operations. Currently this is used only for the server call retry +logic. Used within `AbfsClient` class as part of the ExponentialRetryPolicy. This +value indicates the largest interval (in milliseconds) to wait before retrying +an IO operation. The default value is 30000 (30 seconds). + +`fs.azure.io.retry.backoff.interval`: Sets the default backoff interval for +retries of IO operations. Currently this is used only for the server call retry +logic. Used within `AbfsClient` class as part of the ExponentialRetryPolicy. This +value is used to compute a random delta between 80% and 120% of the specified +value. This random delta is then multiplied by an exponent of the current IO +retry number (i.e., the default is multiplied by `2^(retryNum - 1)`) and then +contstrained within the range of [`fs.azure.io.retry.min.backoff.interval`, +`fs.azure.io.retry.max.backoff.interval`] to determine the amount of time to +wait before the next IO retry attempt. The default value is 3000 (3 seconds). + `fs.azure.write.request.size`: To set the write buffer size. Specify the value in bytes. The value should be between 16384 to 104857600 both inclusive (16 KB to 100 MB). The default value will be 8388608 (8 MB). @@ -859,7 +881,7 @@ when there are too many writes from the same process. ### Security Options `fs.azure.always.use.https`: Enforces to use HTTPS instead of HTTP when the flag -is made true. Irrespective of the flag, AbfsClient will use HTTPS if the secure +is made true. Irrespective of the flag, `AbfsClient` will use HTTPS if the secure scheme (ABFSS) is used or OAuth is used for authentication. By default this will be set to true. diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md index cf3b2344456af..933f86be3e896 100644 --- a/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md +++ b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md @@ -448,7 +448,7 @@ use requires the presence of secret credentials, where tests may be slow, and where finding out why something failed from nothing but the test output is critical. -#### Subclasses Existing Shared Base Blasses +#### Subclasses Existing Shared Base Classes There are a set of base classes which should be extended for Azure tests and integration tests. @@ -602,7 +602,7 @@ various test combinations, it will: 2. Run tests for all combinations 3. Summarize results across all the test combination runs. -As a pre-requiste step, fill config values for test accounts and credentials +As a pre-requisite step, fill config values for test accounts and credentials needed for authentication in `src/test/resources/azure-auth-keys.xml.template` and rename as `src/test/resources/azure-auth-keys.xml`. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java index e10419f148b25..0f8dc55aa14a4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java @@ -18,6 +18,11 @@ package org.apache.hadoop.fs.azurebfs.services; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_BACKOFF_INTERVAL; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_MAX_BACKOFF_INTERVAL; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_MAX_IO_RETRIES; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_MIN_BACKOFF_INTERVAL; + import java.util.Random; import org.junit.Assert; @@ -32,7 +37,6 @@ * Unit test TestExponentialRetryPolicy. */ public class TestExponentialRetryPolicy extends AbstractAbfsIntegrationTest { - private final int maxRetryCount = 30; private final int noRetryCount = 0; private final int retryCount = new Random().nextInt(maxRetryCount); @@ -57,12 +61,38 @@ public void testDifferentMaxIORetryCount() throws Exception { @Test public void testDefaultMaxIORetryCount() throws Exception { AbfsConfiguration abfsConfig = getAbfsConfig(); - Assert.assertTrue( + Assert.assertEquals( String.format("default maxIORetry count is %s.", maxRetryCount), - abfsConfig.getMaxIoRetries() == maxRetryCount); + maxRetryCount, abfsConfig.getMaxIoRetries()); testMaxIOConfig(abfsConfig); } + @Test + public void testAbfsConfigConstructor() throws Exception { + // Ensure we choose expected values that are not defaults + ExponentialRetryPolicy template = new ExponentialRetryPolicy( + getAbfsConfig().getMaxIoRetries()); + int testModifier = 1; + int expectedMaxRetries = template.getRetryCount() + testModifier; + int expectedMinBackoff = template.getMinBackoff() + testModifier; + int expectedMaxBackoff = template.getMaxBackoff() + testModifier; + int expectedDeltaBackoff = template.getDeltaBackoff() + testModifier; + + Configuration config = new Configuration(this.getRawConfiguration()); + config.setInt(AZURE_MAX_IO_RETRIES, expectedMaxRetries); + config.setInt(AZURE_MIN_BACKOFF_INTERVAL, expectedMinBackoff); + config.setInt(AZURE_MAX_BACKOFF_INTERVAL, expectedMaxBackoff); + config.setInt(AZURE_BACKOFF_INTERVAL, expectedDeltaBackoff); + + ExponentialRetryPolicy policy = new ExponentialRetryPolicy( + new AbfsConfiguration(config, "dummyAccountName")); + + Assert.assertEquals("Max retry count was not set as expected.", expectedMaxRetries, policy.getRetryCount()); + Assert.assertEquals("Min backoff interval was not set as expected.", expectedMinBackoff, policy.getMinBackoff()); + Assert.assertEquals("Max backoff interval was not set as expected.", expectedMaxBackoff, policy.getMaxBackoff()); + Assert.assertEquals("Delta backoff interval was not set as expected.", expectedDeltaBackoff, policy.getDeltaBackoff()); + } + private AbfsConfiguration getAbfsConfig() throws Exception { Configuration config = new Configuration(this.getRawConfiguration()); @@ -81,8 +111,8 @@ private void testMaxIOConfig(AbfsConfiguration abfsConfig) { localRetryCount++; } - Assert.assertTrue( + Assert.assertEquals( "When all retries are exhausted, the retryCount will be same as max configured", - localRetryCount == abfsConfig.getMaxIoRetries()); + abfsConfig.getMaxIoRetries(), localRetryCount); } } From e17bd05e3dad2315fc99f377576cf9ff91841575 Mon Sep 17 00:00:00 2001 From: hchaverr Date: Tue, 27 Jul 2021 13:26:34 -0700 Subject: [PATCH 530/666] HADOOP-17819. Add extensions to ProtobufRpcEngine RequestHeaderProto. Contributed by Hector Sandoval Chaverri. (#3242) --- .../hadoop-common/src/main/proto/ProtobufRpcEngine.proto | 3 +++ .../hadoop-common/src/main/proto/ProtobufRpcEngine2.proto | 3 +++ 2 files changed, 6 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto index fa11313402758..f72cf1a8da16e 100644 --- a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto +++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto @@ -64,4 +64,7 @@ message RequestHeaderProto { /** protocol version of class declaring the called method */ required uint64 clientProtocolVersion = 3; + + /** protocol extensions */ + extensions 1000 to max; } diff --git a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto index 16ee880e7b720..c3023ec26dfd4 100644 --- a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto +++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto @@ -64,4 +64,7 @@ message RequestHeaderProto { /** protocol version of class declaring the called method */ required uint64 clientProtocolVersion = 3; + + /** protocol extensions */ + extensions 1000 to max; } From 809ff97ffbc6cb305a069731aacd7192d6abe485 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Thu, 29 Jul 2021 11:18:39 +0800 Subject: [PATCH 531/666] HDFS-15175. Multiple CloseOp shared block instance causes the standby namenode to crash when rolling editlog. Contributed by Wan Chang. Reviewed-by: He Xiaoqiao Reviewed-by: Stephen O'Donnell --- .../hdfs/server/namenode/FSEditLogOp.java | 22 ++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 674676546f1b0..3aeb475408a09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -412,6 +412,17 @@ private static List readXAttrsFromEditLog(DataInputStream in, return PBHelperClient.convertXAttrs(proto.getXAttrsList()); } + private static Block[] deepCopy(Block[] blocks) { + if (blocks == null || blocks.length == 0) { + return blocks; + } + Block[] copy = new Block[blocks.length]; + for (int i = 0; i < blocks.length; ++i) { + copy[i] = blocks[i] == null ? null : new Block(blocks[i]); + } + return copy; + } + @SuppressWarnings("unchecked") static abstract class AddCloseOp extends FSEditLogOp @@ -500,7 +511,7 @@ T setBlocks(Block[] blocks) { throw new RuntimeException("Can't have more than " + MAX_BLOCKS + " in an AddCloseOp."); } - this.blocks = blocks; + this.blocks = FSEditLogOp.deepCopy(blocks); return (T)this; } @@ -978,7 +989,7 @@ public String getPath() { } AddBlockOp setPenultimateBlock(Block pBlock) { - this.penultimateBlock = pBlock; + this.penultimateBlock = pBlock == null ? null : new Block(pBlock); return this; } @@ -987,7 +998,7 @@ Block getPenultimateBlock() { } AddBlockOp setLastBlock(Block lastBlock) { - this.lastBlock = lastBlock; + this.lastBlock = lastBlock == null ? null : new Block(lastBlock); return this; } @@ -1090,7 +1101,7 @@ public String getPath() { } UpdateBlocksOp setBlocks(Block[] blocks) { - this.blocks = blocks; + this.blocks = FSEditLogOp.deepCopy(blocks); return this; } @@ -2881,7 +2892,8 @@ TruncateOp setTimestamp(long timestamp) { } TruncateOp setTruncateBlock(Block truncateBlock) { - this.truncateBlock = truncateBlock; + this.truncateBlock = truncateBlock == null ? + null : new Block(truncateBlock); return this; } From 20c1e20026b4b02c970095e51c535842e79548b9 Mon Sep 17 00:00:00 2001 From: jianghuazhu <740087514@qq.com> Date: Thu, 29 Jul 2021 12:57:28 +0800 Subject: [PATCH 532/666] HDFS-15936.Solve SocketTimeoutException#sendPacket() does not record SocketTimeout exception. (#2836) Co-authored-by: zhujianghua Reviewed-by: cxorm Reviewed-by: Viraj Jasani --- .../java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index eb60ab2d778e7..0225554c696d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -632,6 +632,7 @@ private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, * * Reporting of this case is done in DataXceiver#run */ + LOG.warn("Sending packets timed out.", e); } else { /* Exception while writing to the client. Connection closure from * the other end is mostly the case and we do not care much about From c3e80399f8478235aa16556ae637a51187ad371d Mon Sep 17 00:00:00 2001 From: minni31 Date: Thu, 29 Jul 2021 14:55:39 +0530 Subject: [PATCH 533/666] YARN-10841. Fix token reset synchronization for UAM response token. (#3194) YARN-10841. Fix token reset synchronization for UAM response token. Contributed by Minni Mittal --- .../amrmproxy/FederationInterceptor.java | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java index e95594ca46cfc..c32afee7e9d6f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java @@ -1413,8 +1413,8 @@ protected void mergeAllocateResponse(AllocateResponse homeResponse, if (otherRMAddress.equals(this.homeSubClusterId)) { homeResponse.setAMRMToken(otherResponse.getAMRMToken()); } else { - throw new YarnRuntimeException( - "amrmToken from UAM " + otherRMAddress + " should be null here"); + LOG.warn("amrmToken from UAM {} not null, it should be null here", + otherRMAddress); } } @@ -1691,6 +1691,8 @@ private class HeartbeatCallBack implements AsyncCallback { @Override public void callback(AllocateResponse response) { + org.apache.hadoop.yarn.api.records.Token amrmToken = + response.getAMRMToken(); synchronized (asyncResponseSink) { List responses = null; if (asyncResponseSink.containsKey(subClusterId)) { @@ -1700,6 +1702,11 @@ public void callback(AllocateResponse response) { asyncResponseSink.put(subClusterId, responses); } responses.add(response); + + if (this.isUAM) { + // Do not further propagate the new amrmToken for UAM + response.setAMRMToken(null); + } // Notify main thread about the response arrival asyncResponseSink.notifyAll(); } @@ -1716,9 +1723,9 @@ public void callback(AllocateResponse response) { // Save the new AMRMToken for the UAM if present // Do this last because it can be slow... - if (this.isUAM && response.getAMRMToken() != null) { + if (this.isUAM && amrmToken != null) { Token newToken = ConverterUtils - .convertFromYarn(response.getAMRMToken(), (Text) null); + .convertFromYarn(amrmToken, (Text) null); // Do not further propagate the new amrmToken for UAM response.setAMRMToken(null); From 8095b97f9f49b83f6d1bbe4cfb7639d019378f5c Mon Sep 17 00:00:00 2001 From: Szilard Nemeth Date: Thu, 29 Jul 2021 13:43:40 +0200 Subject: [PATCH 534/666] YARN-10628. Add node usage metrics in SLS. Contributed by Vadaga Ananyo Rao --- .../src/main/html/showSimulationTrace.html | 21 ++++++ .../src/main/html/simulate.html.template | 28 +++++++- .../yarn/sls/scheduler/SchedulerMetrics.java | 47 +++++++++++++ .../yarn/sls/utils/NodeUsageRanges.java | 68 +++++++++++++++++++ .../hadoop/yarn/sls/utils/package-info.java | 21 ++++++ .../apache/hadoop/yarn/sls/web/SLSWebApp.java | 33 +++++++++ 6 files changed, 215 insertions(+), 3 deletions(-) create mode 100644 hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/NodeUsageRanges.java create mode 100644 hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/package-info.java diff --git a/hadoop-tools/hadoop-sls/src/main/html/showSimulationTrace.html b/hadoop-tools/hadoop-sls/src/main/html/showSimulationTrace.html index 102fdcf35ee7e..e5dbc03750592 100644 --- a/hadoop-tools/hadoop-sls/src/main/html/showSimulationTrace.html +++ b/hadoop-tools/hadoop-sls/src/main/html/showSimulationTrace.html @@ -100,6 +100,10 @@

    +
    +
    +
    +

     

    Total capacity{TotalCapacity|fmt_bytes}
    Used capacity{UsedCapacity|fmt_bytes}
    Remaining capacity{RemainingCapacity|fmt_bytes}
    Total capacity{TotalCapacityBigInt|fmt_bytes}
    Used capacity{UsedCapacityBigInt|fmt_bytes}
    Remaining capacity{RemainingCapacityBigInt|fmt_bytes}
    Nameservices{NumNameservices}
    Namenodes{NumNamenodes}