Skip to content

Commit c760eec

Browse files
authored
Add permission checks before reading from HDFS stream (#26716)
Add checks for special permissions before reading hdfs stream data. Also adds test from readonly repository fix. MiniHDFS will now start with an existing repository with a single snapshot contained within. Readonly Repository is created in tests and attempts to list the snapshots within this repo.
1 parent fda8f8b commit c760eec

File tree

8 files changed

+111
-16
lines changed

8 files changed

+111
-16
lines changed

plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
import org.apache.hadoop.fs.FileStatus;
2424
import org.apache.hadoop.fs.Options.CreateOpts;
2525
import org.apache.hadoop.fs.Path;
26+
import org.elasticsearch.SpecialPermission;
2627
import org.elasticsearch.common.Nullable;
2728
import org.elasticsearch.common.blobstore.BlobMetaData;
2829
import org.elasticsearch.common.blobstore.BlobPath;
@@ -45,12 +46,14 @@
4546

4647
final class HdfsBlobContainer extends AbstractBlobContainer {
4748
private final HdfsBlobStore store;
49+
private final HdfsSecurityContext securityContext;
4850
private final Path path;
4951
private final int bufferSize;
5052

51-
HdfsBlobContainer(BlobPath blobPath, HdfsBlobStore store, Path path, int bufferSize) {
53+
HdfsBlobContainer(BlobPath blobPath, HdfsBlobStore store, Path path, int bufferSize, HdfsSecurityContext hdfsSecurityContext) {
5254
super(blobPath);
5355
this.store = store;
56+
this.securityContext = hdfsSecurityContext;
5457
this.path = path;
5558
this.bufferSize = bufferSize;
5659
}
@@ -90,7 +93,9 @@ public InputStream readBlob(String blobName) throws IOException {
9093
// FSDataInputStream can open connections on read() or skip() so we wrap in
9194
// HDFSPrivilegedInputSteam which will ensure that underlying methods will
9295
// be called with the proper privileges.
93-
return store.execute(fileContext -> new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize)));
96+
return store.execute(fileContext ->
97+
new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext)
98+
);
9499
}
95100

96101
@Override
@@ -144,8 +149,11 @@ public Map<String, BlobMetaData> listBlobs() throws IOException {
144149
*/
145150
private static class HDFSPrivilegedInputSteam extends FilterInputStream {
146151

147-
HDFSPrivilegedInputSteam(InputStream in) {
152+
private final HdfsSecurityContext securityContext;
153+
154+
HDFSPrivilegedInputSteam(InputStream in, HdfsSecurityContext hdfsSecurityContext) {
148155
super(in);
156+
this.securityContext = hdfsSecurityContext;
149157
}
150158

151159
public int read() throws IOException {
@@ -175,9 +183,10 @@ public synchronized void reset() throws IOException {
175183
});
176184
}
177185

178-
private static <T> T doPrivilegedOrThrow(PrivilegedExceptionAction<T> action) throws IOException {
186+
private <T> T doPrivilegedOrThrow(PrivilegedExceptionAction<T> action) throws IOException {
187+
SpecialPermission.check();
179188
try {
180-
return AccessController.doPrivileged(action);
189+
return AccessController.doPrivileged(action, null, securityContext.getRestrictedExecutionPermissions());
181190
} catch (PrivilegedActionException e) {
182191
throw (IOException) e.getCause();
183192
}

plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ public String toString() {
7575

7676
@Override
7777
public BlobContainer blobContainer(BlobPath path) {
78-
return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize);
78+
return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize, securityContext);
7979
}
8080

8181
private Path buildHdfsPath(BlobPath blobPath) {

plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ private FileContext createContext(URI uri, Settings repositorySettings) {
132132
hadoopConfiguration.setBoolean("fs.hdfs.impl.disable.cache", true);
133133

134134
// Create the filecontext with our user information
135-
// This will correctly configure the filecontext to have our UGI as it's internal user.
135+
// This will correctly configure the filecontext to have our UGI as its internal user.
136136
return ugi.doAs((PrivilegedAction<FileContext>) () -> {
137137
try {
138138
AbstractFileSystem fs = AbstractFileSystem.get(uri, hadoopConfiguration);

plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,9 @@ class HdfsSecurityContext {
5656
// 1) hadoop dynamic proxy is messy with access rules
5757
new ReflectPermission("suppressAccessChecks"),
5858
// 2) allow hadoop to add credentials to our Subject
59-
new AuthPermission("modifyPrivateCredentials")
59+
new AuthPermission("modifyPrivateCredentials"),
60+
// 3) RPC Engine requires this for re-establishing pooled connections over the lifetime of the client
61+
new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read")
6062
};
6163

6264
// If Security is enabled, we need all the following elevated permissions:
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# Integration tests for HDFS Repository plugin
2+
#
3+
# Tests retrieving information about snapshot
4+
#
5+
---
6+
"Get a snapshot - readonly":
7+
# Create repository
8+
- do:
9+
snapshot.create_repository:
10+
repository: test_snapshot_repository_ro
11+
body:
12+
type: hdfs
13+
settings:
14+
uri: "hdfs://localhost:9999"
15+
path: "/user/elasticsearch/existing/readonly-repository"
16+
readonly: true
17+
18+
# List snapshot info
19+
- do:
20+
snapshot.get:
21+
repository: test_snapshot_repository_ro
22+
snapshot: "_all"
23+
24+
- length: { snapshots: 1 }
25+
26+
# Remove our repository
27+
- do:
28+
snapshot.delete_repository:
29+
repository: test_snapshot_repository_ro
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
# Integration tests for HDFS Repository plugin
2+
#
3+
# Tests retrieving information about snapshot
4+
#
5+
---
6+
"Get a snapshot - readonly":
7+
# Create repository
8+
- do:
9+
snapshot.create_repository:
10+
repository: test_snapshot_repository_ro
11+
body:
12+
type: hdfs
13+
settings:
14+
uri: "hdfs://localhost:9998"
15+
path: "/user/elasticsearch/existing/readonly-repository"
16+
security:
17+
principal: "[email protected]"
18+
readonly: true
19+
20+
# List snapshot info
21+
- do:
22+
snapshot.get:
23+
repository: test_snapshot_repository_ro
24+
snapshot: "_all"
25+
26+
- length: { snapshots: 1 }
27+
28+
# Remove our repository
29+
- do:
30+
snapshot.delete_repository:
31+
repository: test_snapshot_repository_ro

test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java

Lines changed: 32 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,9 @@
1919

2020
package hdfs;
2121

22+
import java.io.File;
2223
import java.lang.management.ManagementFactory;
24+
import java.net.URL;
2325
import java.nio.charset.StandardCharsets;
2426
import java.nio.file.Files;
2527
import java.nio.file.Path;
@@ -29,9 +31,11 @@
2931
import java.util.Arrays;
3032
import java.util.List;
3133

34+
import org.apache.commons.io.FileUtils;
3235
import org.apache.hadoop.conf.Configuration;
3336
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
3437
import org.apache.hadoop.fs.FileSystem;
38+
import org.apache.hadoop.fs.FileUtil;
3539
import org.apache.hadoop.fs.permission.AclEntry;
3640
import org.apache.hadoop.fs.permission.AclEntryType;
3741
import org.apache.hadoop.fs.permission.FsAction;
@@ -100,15 +104,35 @@ public static void main(String[] args) throws Exception {
100104
}
101105
MiniDFSCluster dfs = builder.build();
102106

103-
// Set the elasticsearch user directory up
104-
if (UserGroupInformation.isSecurityEnabled()) {
105-
FileSystem fs = dfs.getFileSystem();
106-
org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch");
107+
// Configure contents of the filesystem
108+
org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch");
109+
try (FileSystem fs = dfs.getFileSystem()) {
110+
111+
// Set the elasticsearch user directory up
107112
fs.mkdirs(esUserPath);
108-
List<AclEntry> acls = new ArrayList<>();
109-
acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build());
110-
fs.modifyAclEntries(esUserPath, acls);
111-
fs.close();
113+
if (UserGroupInformation.isSecurityEnabled()) {
114+
List<AclEntry> acls = new ArrayList<>();
115+
acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build());
116+
fs.modifyAclEntries(esUserPath, acls);
117+
}
118+
119+
// Install a pre-existing repository into HDFS
120+
String directoryName = "readonly-repository";
121+
String archiveName = directoryName + ".tar.gz";
122+
URL readOnlyRepositoryArchiveURL = MiniHDFS.class.getClassLoader().getResource(archiveName);
123+
if (readOnlyRepositoryArchiveURL != null) {
124+
Path tempDirectory = Files.createTempDirectory(MiniHDFS.class.getName());
125+
File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile();
126+
FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive);
127+
FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile());
128+
129+
fs.copyFromLocalFile(true, true,
130+
new org.apache.hadoop.fs.Path(tempDirectory.resolve(directoryName).toAbsolutePath().toUri()),
131+
esUserPath.suffix("/existing/" + directoryName)
132+
);
133+
134+
FileUtils.deleteDirectory(tempDirectory.toFile());
135+
}
112136
}
113137

114138
// write our PID file
Binary file not shown.

0 commit comments

Comments
 (0)