Skip to content

Commit dcee0a3

Browse files
committed
Merge branch 'master' into hadoop-2.1.0
2 parents 0081a6f + 1ee6893 commit dcee0a3

File tree

5 files changed

+57
-12
lines changed

5 files changed

+57
-12
lines changed

pom.xml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
22
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
33
<modelVersion>4.0.0</modelVersion>
4-
<groupId>org.apache.hadoop.fs.glusterfs</groupId>
4+
<groupId>org.gluster</groupId>
55
<artifactId>glusterfs-hadoop</artifactId>
66
<packaging>jar</packaging>
77
<!-- from now, we will update version manually and ignore maven snapshot
88
features -->
9-
<version>2.3.2</version>
9+
<version>2.3.8</version>
1010
<name>glusterfs-hadoop</name>
1111
<url>http://maven.apache.org</url>
1212

src/main/java/org/apache/hadoop/fs/glusterfs/GlusterFileSystem.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ public boolean exists(Path f) throws IOException{
7272

7373
public void setConf(Configuration conf){
7474
log.info("Configuring GlusterFS");
75+
if(conf!=null) conf.addResource("glusterfs-site.xml");
7576
super.setConf(conf);
7677
}
7778

src/main/java/org/apache/hadoop/fs/glusterfs/GlusterVolume.java

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121

2222
package org.apache.hadoop.fs.glusterfs;
2323

24+
2425
import java.io.File;
2526
import java.io.FileNotFoundException;
2627
import java.io.IOException;
@@ -197,8 +198,12 @@ public File pathToFile(Path path) {
197198
}else if(volume==null){
198199
volume = default_volume;
199200
}
200-
201-
return new File(this.volumes.get(volume) + "/" + path.toUri().getPath());
201+
String volPath = this.volumes.get(volume);
202+
if(volPath==null){
203+
throw new RuntimeException("Error undefined volume:" + volume + " in path: " + path);
204+
}
205+
206+
return new File(volPath + "/" + path.toUri().getPath());
202207
}
203208

204209
protected Path getInitialWorkingDirectory() {
@@ -208,7 +213,8 @@ protected Path getInitialWorkingDirectory() {
208213

209214
public Path fileToPath(File path) {
210215
Enumeration<String> all = volumes.keys();
211-
String rawPath = path.toURI().getRawPath();
216+
String rawPath = path.getAbsolutePath();
217+
212218
String volume = null;
213219
String root = null;
214220

@@ -220,11 +226,14 @@ public Path fileToPath(File path) {
220226
root = nextPath;
221227
}
222228
}
229+
230+
if(volume==null){
231+
throw new RuntimeException("No volume matching path: " + path);
232+
}
223233

224234
if(default_volume.equalsIgnoreCase(volume))
225235
volume = "";
226-
227-
return new Path("glusterfs://" + volume + "/" + rawPath.substring(root.length()));
236+
return new Path("glusterfs://" + volume + "/" + rawPath.substring(root.length()));
228237
}
229238

230239
public boolean rename(Path src, Path dst) throws IOException {
@@ -280,6 +289,10 @@ public FileStatus[] listStatus(Path f) throws IOException {
280289
return new FileStatus[] {
281290
new GlusterFileStatus(localf, getDefaultBlockSize(), this) };
282291
}
292+
293+
if(localf.isDirectory() && !localf.canRead()){
294+
throw new IOException("Access denied : " + localf.getPath());
295+
}
283296

284297
File[] names = localf.listFiles();
285298
if (names == null) {
@@ -292,8 +305,7 @@ public FileStatus[] listStatus(Path f) throws IOException {
292305
results[j] = getFileStatus(fileToPath(names[i]));
293306
j++;
294307
} catch (FileNotFoundException e) {
295-
// ignore the files not found since the dir list may have have changed
296-
// since the names[] list was generated.
308+
log.info("ignoring invisible path : " + names[i]);
297309
}
298310
}
299311
if (j == names.length) {

src/main/java/org/apache/hadoop/fs/glusterfs/Version.java

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,23 +4,32 @@
44

55
import java.util.Properties;
66

7+
import org.slf4j.Logger;
8+
import org.slf4j.LoggerFactory;
9+
710
/**
811
* Versioning stuff for the shim. This class is not tested since there is no
912
* deterministic behaviour (i.e. it might not work if not building from binary),
1013
* and the effects are pure side effects.
1114
*/
1215
public class Version extends Properties{
16+
static final Logger LOG = LoggerFactory.getLogger(Version.class);
1317
public Version() {
1418
super();
1519
try{
1620
load(this.getClass().getClassLoader().getResourceAsStream("git.properties"));
1721
}
1822
catch(Throwable t){
19-
throw new RuntimeException("Couldn't find git properties for version info " + t.getMessage());
23+
LOG.warn("Couldn't find GIT properties for version info " +
24+
t.getMessage()+". This jar may have been built OUTSIDE a GIT repo.");
2025
}
2126
}
2227
public String getTag(){
23-
return this.getProperty("git.commit.id.describe").split("-")[0];
28+
String commit = this.getProperty("git.commit.id.describe");
29+
String tag = commit != null ?
30+
commit.split("-")[0]:
31+
"no version info available. check log warnings.";
32+
return commit.split("-")[0];
2433
}
2534

2635
/**

src/test/java/org/apache/hadoop/fs/test/unit/HcfsFileSystemTest.java

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,16 @@
3131
import static org.junit.Assert.assertTrue;
3232

3333
import java.io.IOException;
34+
import java.util.Iterator;
3435

36+
import org.apache.hadoop.conf.Configuration;
3537
import org.apache.hadoop.fs.FSDataInputStream;
3638
import org.apache.hadoop.fs.FSDataOutputStream;
3739
import org.apache.hadoop.fs.FileStatus;
3840
import org.apache.hadoop.fs.FileSystem;
41+
import org.apache.hadoop.fs.LocatedFileStatus;
3942
import org.apache.hadoop.fs.Path;
43+
import org.apache.hadoop.fs.RemoteIterator;
4044
import org.apache.hadoop.fs.permission.FsAction;
4145
import org.apache.hadoop.fs.permission.FsPermission;
4246
import org.apache.hadoop.fs.test.connector.HcfsTestConnectorFactory;
@@ -45,7 +49,6 @@
4549
import org.junit.AfterClass;
4650
import org.junit.Assert;
4751
import org.junit.BeforeClass;
48-
import org.junit.Ignore;
4952
import org.junit.Test;
5053

5154
/**
@@ -56,6 +59,26 @@ public class HcfsFileSystemTest{
5659

5760
static FileSystem fs ;
5861

62+
63+
/**
64+
* See MAPREDUCE-5902 for context on why this test is critical
65+
* for ecosystem interoperability.
66+
*/
67+
@org.junit.Test
68+
public void testEncodedPaths() throws Exception {
69+
//FileSystem fs2 = FileSystem.getLocal(new Configuration());
70+
FileSystem fs2 = fs;
71+
Path encodedFiles=new Path("/tmp/encodedTest"+System.currentTimeMillis());
72+
fs2.mkdirs(encodedFiles);
73+
fs2.create(new Path(encodedFiles,"a"));
74+
fs2.create(new Path(encodedFiles,"a%2"));
75+
fs2.create(new Path(encodedFiles,"a%2a"));
76+
fs2.create(new Path(encodedFiles,"a%3a"));
77+
fs2.create(new Path(encodedFiles,"a%4a"));
78+
Assert.assertEquals(5, fs2.listStatus(encodedFiles).length);
79+
fs2.delete(encodedFiles);
80+
}
81+
5982
@BeforeClass
6083
public static void setup() throws Exception {
6184
HcfsTestConnectorInterface connector = HcfsTestConnectorFactory.getHcfsTestConnector();

0 commit comments

Comments
 (0)