Skip to content

Commit b2247a7

Browse files
committed
Merge pull request #68 from jayunit100/default_buffer_pr
Bug 812924 : Default Buffering + Unit Test to prove that, given bad defa...
2 parents 4563124 + c28fb43 commit b2247a7

File tree

4 files changed

+135
-20
lines changed

4 files changed

+135
-20
lines changed

src/main/java/org/apache/hadoop/fs/glusterfs/GlusterVolume.java

Lines changed: 27 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -33,14 +33,22 @@
3333
import org.apache.hadoop.fs.FileUtil;
3434
import org.apache.hadoop.fs.Path;
3535
import org.apache.hadoop.fs.RawLocalFileSystem;
36+
import org.apache.hadoop.fs.permission.FsPermission;
3637
import org.slf4j.Logger;
3738
import org.slf4j.LoggerFactory;
38-
import org.apache.hadoop.fs.permission.FsPermission;
3939

4040
public class GlusterVolume extends RawLocalFileSystem{
4141

42+
static final Logger log = LoggerFactory.getLogger(GlusterVolume.class);
4243

43-
static final Logger log = LoggerFactory.getLogger(GlusterFileSystemCRC.class);
44+
/**
45+
* General reason for these constants is to help us decide
46+
* when to override the specified buffer size. See implementation
47+
* of logic below, which might change overtime.
48+
*/
49+
public static final int OVERRIDE_WRITE_BUFFER_SIZE = 1024 * 4;
50+
public static final int OPTIMAL_WRITE_BUFFER_SIZE = 1024 * 128;
51+
4452
public static final URI NAME = URI.create("glusterfs:///");
4553

4654
protected String root=null;
@@ -85,24 +93,27 @@ public void setConf(Configuration conf){
8593
if(!exists(mapredSysDirectory)){
8694
mkdirs(mapredSysDirectory);
8795
}
88-
89-
superUser = conf.get("gluster.daemon.user", null);
90-
log.info("Gluster Daemon for ACLs is: " + superUser);
91-
96+
//ACL setup
9297
aclFilter = new AclPathFilter(conf);
93-
94-
/* ensure the initial working directory exists */
95-
final Path workingDirectory = getInitialWorkingDirectory();
98+
superUser = conf.get("gluster.daemon.user", null);
99+
log.info("mapreduce/superuser daemon : " + superUser);
100+
101+
//Working directory setup
102+
Path workingDirectory = getInitialWorkingDirectory();
96103
mkdirs(workingDirectory);
97-
//For hadoop < 1.2.0, when RawLocalFileSystem used
98-
//user.dir instead of user.home
99104
setWorkingDirectory(workingDirectory);
100-
101105
log.info("Working directory is : "+ getWorkingDirectory());
102-
//volName=conf.get("fs.glusterfs.volname", null);
103-
//remoteGFSServer=conf.get("fs.glusterfs.server", null);
104-
105-
}catch (Exception e){
106+
107+
/**
108+
* Write Buffering
109+
*/
110+
Integer userBufferSize=conf.getInt("io.file.buffer.size", -1);
111+
if(userBufferSize == OVERRIDE_WRITE_BUFFER_SIZE || userBufferSize == -1) {
112+
conf.setInt("io.file.buffer.size", OPTIMAL_WRITE_BUFFER_SIZE);
113+
}
114+
log.info("Write buffer size : " +conf.getInt("io.file.buffer.size",-1)) ;
115+
}
116+
catch (Exception e){
106117
throw new RuntimeException(e);
107118
}
108119
}

src/test/java/org/apache/hadoop/fs/test/connector/glusterfs/GlusterFileSystemTestConnector.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,20 @@
11
package org.apache.hadoop.fs.test.connector.glusterfs;
22

33
import org.apache.hadoop.conf.Configuration;
4+
import org.apache.hadoop.fs.glusterfs.GlusterVolume;
45
import org.apache.hadoop.fs.test.connector.HcfsTestConnector;
56

67
/**
78
* A HCFS test connector specifically for instantiation and testing Glusterfs.
89
*/
910
public class GlusterFileSystemTestConnector extends HcfsTestConnector{
10-
11-
public Configuration createConfiguration(){
11+
12+
public Configuration createConfiguration(){
1213
Configuration c = super.createConfiguration();
1314
c.set("fs.glusterfs.mount",System.getProperty("GLUSTER_MOUNT"));
1415
c.set("fs.glusterfs.impl","org.apache.hadoop.fs.local.GlusterFs");
1516
c.set("fs.default.name","glusterfs:///");
17+
c.setInt("io.file.buffer.size",GlusterVolume.OVERRIDE_WRITE_BUFFER_SIZE );
1618
return c;
1719
}
1820

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
package org.apache.hadoop.fs.test.unit;
2+
3+
import static org.apache.hadoop.fs.FileSystemTestHelper.getTestRootPath;
4+
5+
import java.io.IOException;
6+
7+
import org.apache.hadoop.fs.FSDataOutputStream;
8+
import org.apache.hadoop.fs.FileSystem;
9+
import org.apache.hadoop.fs.Path;
10+
import org.apache.hadoop.fs.glusterfs.GlusterVolume;
11+
import org.apache.hadoop.fs.test.connector.HcfsTestConnectorFactory;
12+
import org.apache.hadoop.fs.test.connector.HcfsTestConnectorInterface;
13+
import org.junit.After;
14+
import org.junit.AfterClass;
15+
import org.junit.Assert;
16+
import org.junit.BeforeClass;
17+
import org.junit.Test;
18+
import org.mortbay.log.Log;
19+
import org.slf4j.Logger;
20+
import org.slf4j.LoggerFactory;
21+
22+
/**
23+
* A class for performance and IO related unit tests.
24+
*
25+
* - Write buffering
26+
* - Read buffering
27+
* - Object caching / File lookup caching.
28+
* - Seeking
29+
*/
30+
public class HCFSPerformanceIOTests {
31+
32+
static FileSystem fs ;
33+
Logger log = LoggerFactory.getLogger(HCFSPerformanceIOTests.class);
34+
35+
@BeforeClass
36+
public static void setup() throws Exception {
37+
HcfsTestConnectorInterface connector = HcfsTestConnectorFactory.getHcfsTestConnector();
38+
fs= connector.create();
39+
}
40+
41+
@AfterClass
42+
public static void after() throws IOException{
43+
fs.close();
44+
}
45+
46+
public Path bufferoutpath(){
47+
return getTestRootPath(fs, "buffering_test"+HCFSPerformanceIOTests.class.getName());
48+
}
49+
50+
@After
51+
public void tearDown() throws Exception {
52+
fs.delete(bufferoutpath(),true);
53+
}
54+
55+
//String to append to file we are writing.
56+
static final String CONTENT="1234";
57+
58+
/**
59+
* This is a complex test. It documents the expected behaviour of the
60+
* FileSystem buffering.
61+
*
62+
* It assumes that the configuration value of FS is == the {@link GlusterVolume} OVERRIDE_WRITE_BUFFER_SIZE.
63+
* Then, it starts writing to a stream.
64+
*/
65+
@Test
66+
public void testBufferSpill() throws Exception {
67+
68+
/**
69+
* Sanity check: This test expects that an override is being performed, i.e., that
70+
* the buffering is going to be set to the optimal size, because the file system
71+
* detected that the configured original buffer size was == to the "bad default" value which
72+
* we have decide to override, for the sack of "reasonable defaults" out of the box.
73+
*/
74+
Assert.assertEquals(
75+
GlusterVolume.OPTIMAL_WRITE_BUFFER_SIZE,
76+
fs.getConf().getInt("io.file.buffer.size",-1));
77+
78+
FSDataOutputStream os = fs.create(bufferoutpath());
79+
80+
int written=0;
81+
82+
/**
83+
* Now, we assert that no data is spilled to disk until we reach the optimal size.
84+
*/
85+
while(written < GlusterVolume.OPTIMAL_WRITE_BUFFER_SIZE){
86+
os.write(CONTENT.getBytes());
87+
written+=CONTENT.getBytes().length;
88+
Assert.assertTrue("asserting that file not written yet...",fs.getLength(bufferoutpath())==0);
89+
}
90+
os.flush();
91+
92+
Assert.assertTrue("asserting that is now written... ",fs.getLength(bufferoutpath()) >= GlusterVolume.OPTIMAL_WRITE_BUFFER_SIZE);
93+
94+
os.close();
95+
}
96+
}

src/test/java/org/apache/hadoop/fs/test/unit/HcfsFileSystemTest.java

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525

2626
package org.apache.hadoop.fs.test.unit;
2727

28+
import static org.apache.hadoop.fs.FileSystemTestHelper.getTestRootPath;
2829
import static org.junit.Assert.assertEquals;
2930
import static org.junit.Assert.assertFalse;
3031
import static org.junit.Assert.assertTrue;
@@ -40,6 +41,7 @@
4041
import org.apache.hadoop.fs.permission.FsPermission;
4142
import org.apache.hadoop.fs.test.connector.HcfsTestConnectorFactory;
4243
import org.apache.hadoop.fs.test.connector.HcfsTestConnectorInterface;
44+
import org.junit.After;
4345
import org.junit.AfterClass;
4446
import org.junit.Assert;
4547
import org.junit.BeforeClass;
@@ -62,9 +64,13 @@ public static void setup() throws Exception {
6264
@AfterClass
6365
public static void after() throws IOException{
6466
fs.close();
65-
6667
}
6768

69+
@After
70+
public void tearDown() throws Exception {
71+
fs.delete(getTestRootPath(fs, "test"),true);
72+
}
73+
6874
@org.junit.Test
6975
public void testTolerantMkdirs() throws Exception{
7076
Path longPath=new Path("a/b/c/d");
@@ -355,4 +361,4 @@ public void testPermissionsChanging() throws Exception{
355361

356362
}
357363

358-
}
364+
}

0 commit comments

Comments
 (0)