Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 11 additions & 3 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,21 @@
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.9</version>
<scope>test</scope>
<!-- <scope>test</scope> -->
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.0.5-alpha</version>
<version>2.2.0</version>
</dependency>
<dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.2.0</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<!--<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common-test</artifactId>
<version>0.22.0</version>
Expand All @@ -31,6 +38,7 @@
<version>1.0.0</version>
<scope>test</scope>
</dependency>
-->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,5 +119,9 @@ public void write(DataOutput out) throws IOException{
}
super.write(out);
}

public String toDebugString(){
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be removed.

return "path ="+this.getPath() + " group="+this.getGroup() + " owner=" + this.getOwner() + " modtime=" + this.getModificationTime() + " permission=" + this.getPermission().toShort();
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -33,25 +33,31 @@
import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class GlusterFileSystem extends FilterFileSystem{

protected static final Logger log=LoggerFactory.getLogger(GlusterFileSystem.class);

public GlusterFileSystem(){
super(new GlusterVolume());
Version v=new Version();
log.info("Initializing GlusterFS, CRC disabled.");
log.info("GIT INFO="+v);
log.info("GIT_TAG="+v.getTag());
}


public String getScheme() {
return super.getRawFileSystem().getScheme();
}

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same comment as GlusterVolume - where is getScheme() part of the FileSystem API, and who's calling it?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

iirc The scheme is needed because FilterFileSystem implementation throws an exception for calls to getScheme, so in any case we dont have support for it. Specificlaly the testMkdirsWithUMask will fail without getScheme for accidental reasons (i.e. there is a little if statement in the published test which checks if scheme is s3). But in any case, getScheme should be implemented i would think... Probably we should have a more direct test of it .

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

where is getScheme()? i've looked in 2.2 FilterFileSystem, and FileSystem and can't find it.

/** Convert a path to a File. */
public File pathToFile(Path path){
return ((GlusterVolume) fs).pathToFile(path);
Expand Down
133 changes: 91 additions & 42 deletions src/main/java/org/apache/hadoop/fs/glusterfs/GlusterVolume.java
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,23 @@
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.Comparator;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class GlusterVolume extends RawLocalFileSystem{


static final Logger log = LoggerFactory.getLogger(GlusterVolume.class);

/**
Expand All @@ -59,13 +63,26 @@ public class GlusterVolume extends RawLocalFileSystem{

public GlusterVolume(){
}

public String getScheme(){
return this.NAME.toString();
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where is getScheme() part of the API? Why is it needed?


public GlusterVolume(Configuration conf){
this();
this.setConf(conf);
}
public URI getUri() { return NAME; }

@Override
public boolean mkdirs(Path f,FsPermission permission) throws IOException {
// Note, since umask for unix accessed file system isn't controlled via FS_PERMISSIONS_UMASK_KEY,
// there might be some odd "cascading" umasks here
FsPermission umask = FsPermission.getUMask(getConf());
FsPermission masked = permission.applyUMask(umask);
return super.mkdirs(f,masked);
}

public void setConf(Configuration conf){
log.info("Initializing gluster volume..");
super.setConf(conf);
Expand Down Expand Up @@ -143,25 +160,33 @@ public Path fileToPath(File path) {
return new Path(NAME.toString() + path.toURI().getRawPath().substring(root.length()));
}

public boolean rename(Path src, Path dst) throws IOException {
File dest = pathToFile(dst);

/* two HCFS semantics java.io.File doesn't honor */
if(dest.exists() && dest.isFile() || !(new File(dest.getParent()).exists())) return false;

if (!dest.exists() && pathToFile(src).renameTo(dest)) {
return true;
}
return FileUtil.copy(this, src, this, dst, true, getConf());
}
/**
* Delete the given path to a file or directory.
* @param p the path to delete
* @param recursive to delete sub-directories
* @return true if the file or directory and all its contents were deleted
* @throws IOException if p is non-empty and recursive is false
*/
@Override
public boolean rename(Path src, Path dst) throws IOException {
File dest = pathToFile(dst);

if(dest.exists() && dest.isFile())
return false;

if(! new File(pathToFile(src).toString()).exists()){
//passes ContractBaseTest: testRenameNonExistantPath
//return false;
//passes FsMainOperationsTest: testRenameNonExistantPath
throw new FileNotFoundException(pathToFile(src)+"");
}

if (!dest.exists() && pathToFile(src).renameTo(dest)) {
return true;
}
return FileUtil.copy(this, src, this, dst, true, getConf());
}

/**
* Delete the given path to a file or directory.
* @param p the path to delete
* @param recursive to delete sub-directories
* @return true if the file or directory and all its contents were deleted
* @throws IOException if p is non-empty and recursive is false
*/
@Override
public boolean delete(Path p, boolean recursive) throws IOException {
File f = pathToFile(p);
if(!f.exists()){
Expand All @@ -178,35 +203,59 @@ public boolean delete(Path p, boolean recursive) throws IOException {

public FileStatus[] listStatus(Path f) throws IOException {
File localf = pathToFile(f);
FileStatus[] results;


//f is a file: returns FileStatus[]{f}
if (!localf.exists()) {
throw new FileNotFoundException("File " + f + " does not exist");
throw new FileNotFoundException("File at path: " + f + " does not exist");
}
GlusterFileStatus gfstat=new GlusterFileStatus(localf, getDefaultBlockSize(), this) ;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why move this up here? creates an extra variable, and also instantiates an object which might not be used (if the conditional below is false).

if (localf.isFile()) {
return new FileStatus[] {
new GlusterFileStatus(localf, getDefaultBlockSize(), this) };
return new FileStatus[] { gfstat};
}

//f is a directory: returns FileStatus[] {f1, f2, f3, ... }
else {
//Patch for testListStatusThrowsExceptionForUnreadableDir , may need to update this after HADOOP-7352 :
if(! gfstat.getPermission().getUserAction().implies(FsAction.READ)){
throw new IOException(
"FileStatus indicates this is an unreadable file! Permissions=" + gfstat.getPermission().toShort() + " / Path=" + gfstat.getPath());
}

FileStatus[] results;
File[] names = localf.listFiles();



if (names == null) {
return null;
}
results = new FileStatus[names.length];
int j = 0;
for (int i = 0; i < names.length; i++) {
try {
results[j] = getFileStatus(fileToPath(names[i]));
j++;
}
catch (FileNotFoundException e) {

File[] names = localf.listFiles();
if (names == null) {
return null;
}
results = new FileStatus[names.length];
int j = 0;
for (int i = 0; i < names.length; i++) {
try {
results[j] = getFileStatus(fileToPath(names[i]));
j++;
} catch (FileNotFoundException e) {
// ignore the files not found since the dir list may have have changed
// since the names[] list was generated.
}
}
if (j == names.length) {
return results;
// ignore the files not found since the dir list may have have changed
// since the names[] list was generated.
}
}
if(getConf().getBoolean("fs.glusterfs.list_status_sorted", false)){
Arrays.sort(results, new Comparator<FileStatus>(){
public int compare(FileStatus o1,FileStatus o2){
return o1.getPath().getName().compareTo(o2.getPath().getName());
}
});
}

if (j == names.length) {
return results;
}
return Arrays.copyOf(results, j);
}
return Arrays.copyOf(results, j);
}

public FileStatus getFileStatus(Path f) throws IOException {
Expand Down
1 change: 0 additions & 1 deletion src/main/java/org/apache/hadoop/fs/local/GlusterVol.java
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ public class GlusterVol extends RawLocalFsG{

GlusterVol(final Configuration conf) throws IOException, URISyntaxException {
this(GlusterVolume.NAME, conf);

}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ public FileSystem create(String HcfsClassName) throws IOException{
return hcfs;
} catch (Exception e) {
throw new RuntimeException("Cannont instatiate HCFS. Error:\n " + e);
}
}
}

public FileSystem create() throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ public interface HcfsTestConnectorInterface {

/* return a fully configured instantiated file system for testing */
public FileSystem create() throws IOException;

/* returns a configuration file with properties for a given FS */
public Configuration createConfiguration();

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package org.apache.hadoop.fs.test.connector.glusterfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.glusterfs.GlusterVolume;
import org.apache.hadoop.fs.test.connector.HcfsTestConnector;

Expand All @@ -13,6 +14,13 @@ public Configuration createConfiguration(){
Configuration c = super.createConfiguration();
c.set("fs.glusterfs.mount",System.getProperty("GLUSTER_MOUNT"));
c.set("fs.glusterfs.impl","org.apache.hadoop.fs.local.GlusterFs");
//dont apply umask, that way permissions tests for mkdir w/ 777 pass.
c.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"000");

//So that sorted implementation of testListStatus passes if it runs.
//Note that in newer FSMainOperations tests, testListStatus doesnt require.
c.set("fs.glusterfs.list_status_sorted", "true");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

convention is . not _


c.set("fs.default.name","glusterfs:///");
c.setInt("io.file.buffer.size",GlusterVolume.OVERRIDE_WRITE_BUFFER_SIZE );
return c;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
package org.apache.hadoop.fs.test.unit;

import static org.apache.hadoop.fs.FileSystemTestHelper.getTestRootPath;

import java.io.IOException;

Expand Down Expand Up @@ -44,7 +43,7 @@ public static void after() throws IOException{
}

public Path bufferoutpath(){
return getTestRootPath(fs, "buffering_test"+HCFSPerformanceIOTests.class.getName());
return new Path("/tmp/buffering_test"+HCFSPerformanceIOTests.class.getName());
}

@After
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
package org.apache.hadoop.fs.test.unit;

import static org.apache.hadoop.fs.FileSystemTestHelper.getTestRootPath;

import java.io.File;

import junit.framework.Assert;
Expand Down Expand Up @@ -55,6 +53,6 @@ public void test() throws Exception {

@After
public void tearDown() throws Exception{
fSys.delete(getTestRootPath(fSys, "test"), true);
//fSys.delete(getTestRootPath(fSys, "test"), true);
}
}
Loading