Skip to content

Commit a6286a9

Browse files
committed
Merge branch 'master' into super_user
Conflicts: src/main/java/org/apache/hadoop/fs/glusterfs/GlusterVolume.java
2 parents 2a02746 + 4d76213 commit a6286a9

18 files changed

+907
-704
lines changed

README

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ CONFIGURATION
100100
The default FileSystem API to use (there is little reason to modify this).
101101

102102
name: fs.default.name
103-
value: glusterfs://server:port
103+
value: glusterfs:///
104104

105105
The default name that hadoop uses to represent file as a URI (typically a server:port tuple). Use any host
106106
in the cluster as the server and any port number. This option has to be in server:port format for hadoop
@@ -115,10 +115,10 @@ CONFIGURATION
115115
name: fs.glusterfs.mount
116116
value: /mnt/glusterfs
117117

118-
This is the directory that the plugin will use to mount (FUSE mount) the volume.
118+
This is the directory where the gluster volume is mounted
119119

120120
name: fs.glusterfs.server
121-
value: 192.168.1.36, hackme.zugzug.org
121+
value: localhost
122122

123123
To mount a volume the plugin needs to know the hostname or the IP of a GlusterFS server in the cluster.
124124
Mention it here.
@@ -180,8 +180,9 @@ The unit tests read test resources from glusterconfig.properties - a file which
180180

181181
1) edit your .bashrc, or else at your terminal run :
182182

183-
export GLUSTER_VOLUME=MyVolume <-- replace with your preferred volume name (default is HadoopVol)
184-
export GLUSTER_HOST=192.0.1.2 <-- replace with your host (default will be determined at runtime in the JVM)
183+
export GLUSTER_MOUNT=/mnt/glusterfs
184+
export HCFS_FILE_SYSTEM_CONNECTOR=org.apache.hadoop.hcfs.test.connector.glusterfs.GlusterFileSystemTestConnector
185+
export HCFS_CLASSNAME=org.apache.hadoop.fs.glusterfs.GlusterFileSystem
185186

186187
2) run:
187188
mvn package

pom.xml

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,8 @@
1111
<dependency>
1212
<groupId>junit</groupId>
1313
<artifactId>junit</artifactId>
14-
<version>4.4</version>
14+
<version>4.9</version>
15+
<scope>test</scope>
1516
</dependency>
1617
<dependency>
1718
<groupId>org.apache.hadoop</groupId>
@@ -47,21 +48,27 @@
4748
<plugin>
4849
<groupId>org.apache.maven.plugins</groupId>
4950
<artifactId>maven-surefire-plugin</artifactId>
50-
<version>2.14</version>
51+
<version>2.4.3</version>
5152
<configuration>
5253
<!--
53-
run "export GLUSTER_HOST=192.X.X.1
54-
export GLUSTER_VOLUME=volname"
54+
run "export GLUSTER_MOUNT=/mnt/glusterfs
55+
export HCFS_FILE_SYSTEM_CONNECTOR=org.gluster.test.GlusterFileSystemTestConnector
56+
export HCFS_CLASSNAME=org.apache.hadoop.fs.glusterfs.GlusterFileSystem"
5557
-->
5658
<systemProperties>
5759
<property>
58-
<name>GLUSTER_HOST</name>
59-
<value>${GLUSTER_HOST}</value>
60+
<name>GLUSTER_MOUNT</name>
61+
<value>${GLUSTER_MOUNT}</value>
6062
</property>
61-
<property>
62-
<name>GLUSTER_VOLUME</name>
63-
<value>${GLUSTER_VOLUME}</value>
63+
<property>
64+
<name>HCFS_CLASSNAME</name>
65+
<value>${HCFS_CLASSNAME}</value>
66+
</property>
67+
<property>
68+
<name>HCFS_FILE_SYSTEM_CONNECTOR</name>
69+
<value>${HCFS_FILE_SYSTEM_CONNECTOR}</value>
6470
</property>
71+
6572
</systemProperties>
6673
</configuration>
6774
</plugin>

pom.xml-1.2.1

Lines changed: 179 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,179 @@
1+
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
2+
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
3+
<modelVersion>4.0.0</modelVersion>
4+
<groupId>org.apache.hadoop.fs.glusterfs</groupId>
5+
<artifactId>glusterfs</artifactId>
6+
<packaging>jar</packaging>
7+
<version>2.0-SNAPSHOT</version>
8+
<name>glusterfs</name>
9+
<url>http://maven.apache.org</url>
10+
<dependencies>
11+
<dependency>
12+
<groupId>junit</groupId>
13+
<artifactId>junit</artifactId>
14+
<version>4.11</version>
15+
</dependency>
16+
<dependency>
17+
<groupId>org.apache.hadoop</groupId>
18+
<artifactId>hadoop-core</artifactId>
19+
<version>1.2.1</version>
20+
</dependency>
21+
<dependency>
22+
<groupId>org.apache.hadoop</groupId>
23+
<artifactId>hadoop-test</artifactId>
24+
<version>1.2.1</version>
25+
<scope>test</scope>
26+
</dependency>
27+
<dependency>
28+
<groupId>org.apache.hadoop</groupId>
29+
<artifactId>hadoop-test</artifactId>
30+
<version>1.2.1</version>
31+
<scope>test</scope>
32+
</dependency>
33+
<dependency>
34+
<groupId>org.slf4j</groupId>
35+
<artifactId>slf4j-api</artifactId>
36+
<version>1.5.8</version>
37+
<scope>compile</scope>
38+
</dependency>
39+
<dependency>
40+
<groupId>org.slf4j</groupId>
41+
<artifactId>slf4j-log4j12</artifactId>
42+
<version>1.7.3</version>
43+
</dependency>
44+
</dependencies>
45+
<build>
46+
<plugins>
47+
<plugin>
48+
<groupId>org.apache.maven.plugins</groupId>
49+
<artifactId>maven-surefire-plugin</artifactId>
50+
<version>2.14</version>
51+
<configuration>
52+
<!--
53+
run "export GLUSTER_MOUNT=/mnt/glusterfs
54+
export HCFS_FILE_SYSTEM_CONNECTOR=org.gluster.test.GlusterFileSystemTestConnector
55+
export HCFS_CLASSNAME=org.apache.hadoop.fs.glusterfs.GlusterFileSystem"
56+
-->
57+
<systemProperties>
58+
<property>
59+
<name>GLUSTER_MOUNT</name>
60+
<value>${GLUSTER_MOUNT}</value>
61+
</property>
62+
<property>
63+
<name>HCFS_CLASSNAME</name>
64+
<value>${HCFS_CLASSNAME}</value>
65+
</property>
66+
<property>
67+
<name>HCFS_FILE_SYSTEM_CONNECTOR</name>
68+
<value>${HCFS_FILE_SYSTEM_CONNECTOR}</value>
69+
</property>
70+
71+
</systemProperties>
72+
</configuration>
73+
</plugin>
74+
75+
<plugin>
76+
<groupId>org.apache.maven.plugins</groupId>
77+
<artifactId>maven-compiler-plugin</artifactId>
78+
<version>2.3.2</version>
79+
<configuration>
80+
<source>1.5</source>
81+
<target>1.5</target>
82+
</configuration>
83+
</plugin>
84+
85+
<plugin>
86+
<groupId>pl.project13.maven</groupId>
87+
<artifactId>git-commit-id-plugin</artifactId>
88+
<version>2.1.4</version>
89+
<executions>
90+
<execution>
91+
<goals>
92+
<goal>revision</goal>
93+
</goals>
94+
</execution>
95+
</executions>
96+
97+
<configuration>
98+
<!-- that's the default value, you don't have to set it -->
99+
<prefix>git</prefix>
100+
101+
<!-- that's the default value -->
102+
<dateFormat>dd.MM.yyyy '@' HH:mm:ss z</dateFormat>
103+
104+
<!-- true is default here, it prints some more information during the build -->
105+
<verbose>true</verbose>
106+
107+
<!--
108+
If you'd like to tell the plugin where your .git directory is,
109+
use this setting, otherwise we'll perform a search trying to
110+
figure out the right directory. It's better to add it explicite IMHO.
111+
-->
112+
<dotGitDirectory>${project.basedir}/.git</dotGitDirectory>
113+
114+
<!-- ALTERNATE SETUP - GENERATE FILE -->
115+
<!--
116+
If you want to keep git information, even in your WAR file etc,
117+
use this mode, which will generate a properties file (with filled out values)
118+
which you can then normally read using new Properties().load(/**/)
119+
-->
120+
121+
<!--
122+
this is true by default; You may want to set this to false, if the plugin should run inside a
123+
<packaging>pom</packaging> project. Most projects won't need to override this property.
124+
125+
For an use-case for this kind of behaviour see: https://github.com/ktoso/maven-git-commit-id-plugin/issues/21
126+
-->
127+
<skipPoms>true</skipPoms>
128+
129+
<!-- this is false by default, forces the plugin to generate the git.properties file -->
130+
<generateGitPropertiesFile>true</generateGitPropertiesFile>
131+
132+
<!-- The path for the to be generated properties file, it's relative to ${project.basedir} -->
133+
<generateGitPropertiesFilename>src/main/resources/git.properties</generateGitPropertiesFilename>
134+
135+
<!-- true by default, controls whether the plugin will fail when no .git directory is found, when set to false the plugin will just skip execution -->
136+
<!-- @since 2.0.4 -->
137+
<failOnNoGitDirectory>false</failOnNoGitDirectory>
138+
139+
<!-- @since 2.1.0 -->
140+
<!--
141+
read up about git-describe on the in man, or it's homepage - it's a really powerful versioning helper
142+
and the recommended way to use git-commit-id-plugin. The configuration bellow is optional,
143+
by default describe will run "just like git-describe on the command line", even though it's a JGit reimplementation.
144+
-->
145+
<gitDescribe>
146+
<!-- This will show the available tags-->
147+
<tags>true</tags>
148+
149+
<!-- don't generate the describe property -->
150+
<skip>false</skip>
151+
<!--
152+
if no tag was found "near" this commit, just print the commit's id instead,
153+
helpful when you always expect this field to be not-empty
154+
-->
155+
<always>false</always>
156+
<!--
157+
how many chars should be displayed as the commit object id?
158+
7 is git's default,
159+
0 has a special meaning (see end of this README.md),
160+
and 40 is the maximum value here
161+
-->
162+
<abbrev>7</abbrev>
163+
164+
<!-- when the build is triggered while the repo is in "dirty state", append this suffix -->
165+
<dirty>-dirty</dirty>
166+
167+
<!--
168+
always print using the "tag-commits_from_tag-g_commit_id-maybe_dirty" format, even if "on" a tag.
169+
The distance will always be 0 if you're "on" the tag.
170+
-->
171+
<forceLongFormat>false</forceLongFormat>
172+
</gitDescribe>
173+
</configuration>
174+
175+
</plugin>
176+
177+
</plugins>
178+
</build>
179+
</project>

src/main/java/org/apache/hadoop/fs/glusterfs/GlusterVolume.java

Lines changed: 58 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
import org.apache.hadoop.conf.Configuration;
3131
import org.apache.hadoop.fs.BlockLocation;
3232
import org.apache.hadoop.fs.FileStatus;
33+
import org.apache.hadoop.fs.FileUtil;
3334
import org.apache.hadoop.fs.Path;
3435
import org.apache.hadoop.fs.RawLocalFileSystem;
3536
import org.slf4j.Logger;
@@ -38,6 +39,7 @@
3839

3940
public class GlusterVolume extends RawLocalFileSystem{
4041

42+
4143
static final Logger log = LoggerFactory.getLogger(GlusterFileSystemCRC.class);
4244
public static final URI NAME = URI.create("glusterfs:///");
4345

@@ -85,6 +87,11 @@ public void setConf(Configuration conf){
8587
superUser = conf.get("gluster.daemon.user", null);
8688

8789
aclFilter = new AclPathFilter(conf);
90+
91+
/* ensure the initial working directory exists */
92+
Path workingDirectory = getInitialWorkingDirectory();
93+
mkdirs(workingDirectory);
94+
8895
//volName=conf.get("fs.glusterfs.volname", null);
8996
//remoteGFSServer=conf.get("fs.glusterfs.server", null);
9097

@@ -94,21 +101,60 @@ public void setConf(Configuration conf){
94101
}
95102

96103
}
97-
104+
98105
public File pathToFile(Path path) {
99-
String pathString = path.toUri().getRawPath();
100-
101-
if(pathString.startsWith(Path.SEPARATOR)){
102-
pathString = pathString.substring(1);
103-
}
104-
105-
return new File(root + Path.SEPARATOR + pathString);
106+
checkPath(path);
107+
if (!path.isAbsolute()) {
108+
path = new Path(getWorkingDirectory(), path);
109+
}
110+
return new File(root + path.toUri().getPath());
106111
}
107-
108-
public Path fileToPath(File path) {
112+
113+
@Override
114+
protected Path getInitialWorkingDirectory() {
115+
/* apache's unit tests use a default working direcotry like this: */
116+
return new Path(this.NAME + "user/" + System.getProperty("user.name"));
117+
/* The super impl returns the users home directory in unix */
118+
//return super.getInitialWorkingDirectory();
119+
}
120+
121+
public Path fileToPath(File path) {
109122
return new Path(NAME.toString() + path.toURI().getRawPath().substring(root.length()));
110-
}
111-
123+
}
124+
125+
public boolean rename(Path src, Path dst) throws IOException {
126+
File dest = pathToFile(dst);
127+
128+
/* two HCFS semantics java.io.File doesn't honor */
129+
if(dest.exists() && dest.isFile() || !(new File(dest.getParent()).exists())) return false;
130+
131+
if (!dest.exists() && pathToFile(src).renameTo(dest)) {
132+
return true;
133+
}
134+
return FileUtil.copy(this, src, this, dst, true, getConf());
135+
}
136+
/**
137+
* Delete the given path to a file or directory.
138+
* @param p the path to delete
139+
* @param recursive to delete sub-directories
140+
* @return true if the file or directory and all its contents were deleted
141+
* @throws IOException if p is non-empty and recursive is false
142+
*/
143+
@Override
144+
public boolean delete(Path p, boolean recursive) throws IOException {
145+
File f = pathToFile(p);
146+
if(!f.exists()){
147+
/* HCFS semantics expect 'false' if attempted file deletion on non existent file */
148+
return false;
149+
}else if (f.isFile()) {
150+
return f.delete();
151+
} else if (!recursive && f.isDirectory() &&
152+
(FileUtil.listFiles(f).length != 0)) {
153+
throw new IOException("Directory " + f.toString() + " is not empty");
154+
}
155+
return FileUtil.fullyDelete(f);
156+
}
157+
112158
public FileStatus[] listStatus(Path f) throws IOException {
113159
File localf = pathToFile(f);
114160
FileStatus[] results;
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
package org.apache.hadoop.fs.test.connector;
2+
3+
import java.io.IOException;
4+
5+
import org.apache.hadoop.conf.Configuration;
6+
import org.apache.hadoop.fs.FileSystem;
7+
8+
/*
9+
* Generic HCFS file system test connector.
10+
* This test connector takes a fully qualified o.a.h.f.FileSystem implementor class
11+
* as an environment variable.
12+
*
13+
*/
14+
public class HcfsTestConnector implements HcfsTestConnectorInterface{
15+
16+
public Configuration createConfiguration(){
17+
return new Configuration();
18+
}
19+
20+
public FileSystem create(String HcfsClassName) throws IOException{
21+
try {
22+
FileSystem hcfs = (FileSystem)Class.forName(HcfsClassName).newInstance();
23+
hcfs.initialize(hcfs.getUri(), createConfiguration());
24+
return hcfs;
25+
} catch (Exception e) {
26+
throw new RuntimeException("Cannont instatiate HCFS. Error:\n " + e);
27+
}
28+
}
29+
30+
public FileSystem create() throws IOException {
31+
return create(System.getProperty("HCFS_CLASSNAME"));
32+
}
33+
}

0 commit comments

Comments
 (0)