diff --git a/pom.xml b/pom.xml
index 0b788fa5..bcda9a8c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -9,7 +9,40 @@
2.1.6
glusterfs-hadoop
http://maven.apache.org
+
+
+
+
+ libgfapi-java-release-repo
+ libgfapi-java Release Repository
+ http://snuffy.s3-website-us-east-1.amazonaws.com/release
+
+ true
+
+
+ false
+
+
+
+ libgfapi-java-s3-snapshot-repo
+ libgfapi-java Snapshot Repository
+ http://snuffy.s3-website-us-east-1.amazonaws.com/snapshot
+
+ false
+
+
+ true
+
+
+
+
+
+
+ glusterfs
+ libgfapi-java-io
+ 1.0-SNAPSHOT
+
junit
junit
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFSBrickClass.java b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFSBrickClass.java
new file mode 100644
index 00000000..4413bb28
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFSBrickClass.java
@@ -0,0 +1,104 @@
+/**
+ *
+ * Copyright (c) 2011 Gluster, Inc.
+ * This file is part of GlusterFS.
+ *
+ * Licensed under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.libgfsio;
+
+import java.io.*;
+
+public class GlusterFSBrickClass{
+ String host;
+ String exportedFile;
+ long start;
+ long end;
+ boolean isChunked;
+ int stripeSize; // Stripe size in bytes
+ int nrStripes; // number of stripes
+ int switchCount; // for SR, DSR - number of replicas of each stripe
+ // -1 for others
+
+ public GlusterFSBrickClass(String brick, long start, long len, boolean flag, int stripeSize, int nrStripes, int switchCount) throws IOException{
+ this.host=brick2host(brick);
+ this.exportedFile=brick2file(brick);
+ this.start=start;
+ this.end=start+len;
+ this.isChunked=flag;
+ this.stripeSize=stripeSize;
+ this.nrStripes=nrStripes;
+ this.switchCount=switchCount;
+ }
+
+ public boolean isChunked(){
+ return isChunked;
+ }
+
+ public String brickIsLocal(String hostname){
+ String path=null;
+ if(host.equals(hostname))
+ path=exportedFile;
+
+ return path;
+ }
+
+ public int[] getBrickNumberInTree(long start,int len){
+ long end=len;
+ int startNodeInTree=((int) (start/stripeSize))%nrStripes;
+ int endNodeInTree=((int) ((start+len)/stripeSize))%nrStripes;
+
+ if(startNodeInTree!=endNodeInTree){
+ end=(start-(start%stripeSize))+stripeSize;
+ end-=start;
+ }
+
+ return new int[]{startNodeInTree,endNodeInTree,(int) end};
+ }
+
+ public boolean brickHasFilePart(int nodeInTree,int nodeLoc){
+ if(switchCount==-1)
+ return (nodeInTree==nodeLoc);
+
+ nodeInTree*=switchCount;
+ for(int i=nodeInTree;i<(nodeInTree+switchCount);i++){
+ if(i==nodeLoc)
+ return true;
+ }
+
+ return false;
+ }
+
+ public String brick2host(String brick) throws IOException{
+ String[] hf=null;
+
+ hf=brick.split(":");
+ if(hf.length!=2)
+ throw new IOException("Error getting hostname from brick");
+
+ return hf[0];
+ }
+
+ public String brick2file(String brick) throws IOException{
+ String[] hf=null;
+
+ hf=brick.split(":");
+ if(hf.length!=2)
+ throw new IOException("Error getting hostname from brick");
+
+ return hf[1];
+ }
+
+}
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFSBrickRepl.java b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFSBrickRepl.java
new file mode 100644
index 00000000..c24eecbd
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFSBrickRepl.java
@@ -0,0 +1,50 @@
+/**
+ *
+ * Copyright (c) 2011 Gluster, Inc.
+ * This file is part of GlusterFS.
+ *
+ * Licensed under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.libgfsio;
+
+public class GlusterFSBrickRepl{
+ private String[] replHost;
+ private long start;
+ private long len;
+ private int cnt;
+
+ public GlusterFSBrickRepl(int replCount, long start, long len){
+ this.replHost=new String[replCount];
+ this.start=start;
+ this.len=len;
+ this.cnt=0;
+ }
+
+ public void addHost(String host){
+ this.replHost[cnt++]=host;
+ }
+
+ public String[] getReplHosts(){
+ return this.replHost;
+ }
+
+ public long getStartLen(){
+ return this.start;
+ }
+
+ public long getOffLen(){
+ return this.len;
+ }
+}
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFileStatus.java b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFileStatus.java
new file mode 100644
index 00000000..1cac77d8
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFileStatus.java
@@ -0,0 +1,58 @@
+/**
+ *
+ * Copyright (c) 2011 Red Hat, Inc.
+ * This file is part of GlusterFS.
+ *
+ * Licensed under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * Implements the Hadoop FileSystem Interface to allow applications to store
+ * files on GlusterFS and run Map/Reduce jobs on the data.
+ *
+ *
+ */
+
+package org.apache.hadoop.fs.libgfsio;
+
+import java.io.DataOutput;
+import java.io.File;
+import java.io.IOException;
+import java.util.StringTokenizer;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
+import org.gluster.fs.GlusterFile;
+/*
+ * Copied from org.apache.fs.RawLocalFileSystem.RawFileStatus
+ */
+public class GlusterFileStatus extends FileStatus{
+
+
+
+ public GlusterFileStatus(GlusterFile file, GlusterfsVolume vol) {
+ super(file.length(),
+ file.isDirectory(),
+ 0, // repliation count
+ file.getBlockSize(),
+ file.getMtime(),
+ file.getAtime(),
+ new FsPermission(new Long(file.getMod()).shortValue()),
+ IdLookup.getName(new Long(file.getUid()).intValue()),
+ IdLookup.getName(new Long(file.getGid()).intValue()),
+ null, // path to link
+ vol.makeQualified(new Path(file.getPath())));
+
+ }
+}
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFileSystem.java b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFileSystem.java
new file mode 100644
index 00000000..ffce62dc
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFileSystem.java
@@ -0,0 +1,89 @@
+/**
+ *
+ * Copyright (c) 2013 Red Hat, Inc.
+ * This file is part of GlusterFS.
+ *
+ * Licensed under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ */
+
+/**
+ * Implements the Hadoop FileSystem Interface to allow applications to store
+ * files on GlusterFS and run Map/Reduce jobs on the data. This code does NOT perform a CRC
+ * on files.
+ *
+ * gluster file systems are specified with the glusterfs:// prefix.
+ *
+ *
+ */
+package org.apache.hadoop.fs.libgfsio;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.FilterFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class GlusterFileSystem extends FilterFileSystem{
+
+ protected static final Logger log=LoggerFactory.getLogger(GlusterFileSystem.class);
+
+ public GlusterFileSystem(){
+ super(new GlusterfsVolume());
+ Version v=new Version();
+ log.info("Initializing GlusterFS, CRC disabled.");
+ log.info("GIT INFO="+v);
+ log.info("GIT_TAG="+v.getTag());
+ }
+
+ /**
+ * Get file status.
+ */
+ public boolean exists(Path f) throws IOException{
+ return getRawFileSystem().exists(f);
+ }
+
+ public void setConf(Configuration conf){
+ log.info("Configuring GlusterFS");
+ super.setConf(conf);
+ }
+
+ /*
+ * if GlusterFileSystem is the default filesystem, real local URLs come back
+ * without a file:/ scheme name (BUG!). the glusterfs file system is
+ * assumed. force a schema.
+ */
+
+ public void copyFromLocalFile(boolean delSrc,Path src,Path dst) throws IOException{
+ FileSystem srcFs=new Path("file:/"+src.toString()).getFileSystem(getConf());
+ FileSystem dstFs=dst.getFileSystem(getConf());
+ FileUtil.copy(srcFs, src, dstFs, dst, delSrc, getConf());
+ }
+
+ public void copyToLocalFile(boolean delSrc,Path src,Path dst) throws IOException{
+ FileSystem srcFs=src.getFileSystem(getConf());
+ FileSystem dstFs=new Path("file:/"+dst.toString()).getFileSystem(getConf());
+ FileUtil.copy(srcFs, src, dstFs, dst, delSrc, getConf());
+ }
+
+ public String toString(){
+ return "Gluster File System, no CRC.";
+ }
+}
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFs.java b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFs.java
new file mode 100644
index 00000000..61e8f492
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFs.java
@@ -0,0 +1,51 @@
+/**
+ *
+ * Copyright (c) 2013 Red Hat, Inc.
+ * This file is part of GlusterFS.
+ *
+ * Licensed under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ */
+
+/**
+ * Implements the Hadoop FileSystem 2.x Interface to allow applications to store
+ * files on GlusterFS and run Map/Reduce jobs on the data. This code does not perform a CRC
+ * on the files.
+ *
+ * gluster file systems are specified with the glusterfs:// prefix.
+ *
+ *
+ */
+
+package org.apache.hadoop.fs.libgfsio;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FilterFs;
+
+public class GlusterFs extends FilterFs{
+
+ GlusterFs(Configuration conf) throws IOException, URISyntaxException{
+ super(new GlusterVol(conf));
+ }
+
+
+ GlusterFs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException{
+ this(conf);
+ }
+
+
+}
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFsStatus.java b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFsStatus.java
new file mode 100644
index 00000000..cbbc7761
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterFsStatus.java
@@ -0,0 +1,19 @@
+package org.apache.hadoop.fs.libgfsio;
+
+import org.apache.hadoop.fs.FsStatus;
+import org.gluster.fs.GlusterVolume;
+
+public class GlusterFsStatus extends FsStatus{
+
+ /** Construct a FsStatus object, using the specified statistics */
+ public GlusterFsStatus(long capacity, long used, long remaining) {
+ super(capacity,used,remaining);
+ }
+
+ public GlusterFsStatus(GlusterVolume vol) {
+ super( vol.getSize(),vol.getSize()-vol.getFree(),vol.getFree());
+ }
+
+
+
+}
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterVol.java b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterVol.java
new file mode 100644
index 00000000..2bad4b8d
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterVol.java
@@ -0,0 +1,152 @@
+package org.apache.hadoop.fs.libgfsio;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DelegateToFileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.fs.local.LocalConfigKeys;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
+
+public class GlusterVol extends DelegateToFileSystem {
+
+ GlusterVol(final Configuration conf) throws IOException, URISyntaxException {
+ this(GlusterfsVolume.NAME, conf);
+
+ }
+
+ /**
+ * This constructor has the signature needed by
+ * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}.
+ *
+ * @param theUri
+ * which must be that of localFs
+ * @param conf
+ * @throws IOException
+ * @throws URISyntaxException
+ */
+ GlusterVol(final URI theUri, final Configuration conf) throws IOException,
+ URISyntaxException {
+ super(theUri, new GlusterfsVolume(), conf, "glusterfs", false);
+ }
+
+ GlusterVol(final URI theUri, final FileSystem fs, final Configuration conf,
+ boolean authorityRequired) throws IOException, URISyntaxException {
+ super(theUri, fs, conf, theUri.getScheme(), authorityRequired);
+ }
+
+ @Override
+ public int getUriDefaultPort() {
+ return -1; // No default port for file:///
+ }
+
+ @Override
+ public FsServerDefaults getServerDefaults() throws IOException {
+ return LocalConfigKeys.getServerDefaults();
+ }
+
+ @Override
+ public boolean supportsSymlinks() {
+ return true;
+ }
+
+ @Override
+ public void createSymlink(Path target, Path link, boolean createParent)
+ throws IOException {
+ final String targetScheme = target.toUri().getScheme();
+ if (targetScheme != null && !"file".equals(targetScheme)) {
+ throw new IOException("Unable to create symlink to non-local file "
+ + "system: " + target.toString());
+ }
+ if (createParent) {
+ mkdir(link.getParent(), FsPermission.getDirDefault(), true);
+ }
+ // NB: Use createSymbolicLink in java.nio.file.Path once available
+ try {
+ Shell.execCommand(Shell.LINK_COMMAND, "-s",
+ new URI(target.toString()).getPath(),
+ new URI(link.toString()).getPath());
+ } catch (URISyntaxException x) {
+ throw new IOException("Invalid symlink path: " + x.getMessage());
+ } catch (IOException x) {
+ throw new IOException("Unable to create symlink: " + x.getMessage());
+ }
+ }
+
+ /**
+ * Returns the target of the given symlink. Returns the empty string if the
+ * given path does not refer to a symlink or there is an error acessing the
+ * symlink.
+ */
+ private String readLink(Path p) {
+ /*
+ * NB: Use readSymbolicLink in java.nio.file.Path once available. Could
+ * use getCanonicalPath in File to get the target of the symlink but
+ * that does not indicate if the given path refers to a symlink.
+ */
+ try {
+ final String path = p.toUri().getPath();
+ return Shell.execCommand(Shell.READ_LINK_COMMAND, path).trim();
+ } catch (IOException x) {
+ return "";
+ }
+ }
+
+ /**
+ * Return a FileStatus representing the given path. If the path refers to a
+ * symlink return a FileStatus representing the link rather than the object
+ * the link refers to.
+ */
+ @Override
+ public FileStatus getFileLinkStatus(final Path f) throws IOException {
+ String target = readLink(f);
+ try {
+ FileStatus fs = getFileStatus(f);
+ // If f refers to a regular file or directory
+ if ("".equals(target)) {
+ return fs;
+ }
+ // Otherwise f refers to a symlink
+ return new FileStatus(fs.getLen(), false, fs.getReplication(),
+ fs.getBlockSize(), fs.getModificationTime(),
+ fs.getAccessTime(), fs.getPermission(), fs.getOwner(),
+ fs.getGroup(), new Path(target), f);
+ } catch (FileNotFoundException e) {
+ /*
+ * The exists method in the File class returns false for dangling
+ * links so we can get a FileNotFoundException for links that exist.
+ * It's also possible that we raced with a delete of the link. Use
+ * the readBasicFileAttributes method in java.nio.file.attributes
+ * when available.
+ */
+ if (!"".equals(target)) {
+ return new FileStatus(0, false, 0, 0, 0, 0,
+ FsPermission.getDefault(), "", "", new Path(target), f);
+ }
+ // f refers to a file or directory that does not exist
+ throw e;
+ }
+ }
+
+ @Override
+ public Path getLinkTarget(Path f) throws IOException {
+ /*
+ * We should never get here. Valid local links are resolved
+ * transparently by the underlying local file system and accessing a
+ * dangling link will result in an IOException, not an
+ * UnresolvedLinkException, so FileContext should never call this
+ * function.
+ */
+ throw new AssertionError();
+ }
+
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterfsVolume.java b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterfsVolume.java
new file mode 100644
index 00000000..e27e41a4
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterfsVolume.java
@@ -0,0 +1,565 @@
+/**
+ *
+ * Copyright (c) 2013 Red Hat, Inc.
+ * This file is part of GlusterFS.
+ *
+ * Licensed under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * Extends the RawLocalFileSystem to add support for Gluster Volumes.
+ *
+ */
+
+package org.apache.hadoop.fs.libgfsio;
+
+import java.io.BufferedOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BufferedFSInputStream;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Progressable;
+import org.gluster.fs.GlusterClient;
+import org.gluster.fs.GlusterFile;
+import org.gluster.fs.GlusterVolume;
+import org.gluster.fs.IGlusterInputStream;
+import org.gluster.fs.IGlusterOutputStream;
+
+public class GlusterfsVolume extends FileSystem {
+
+ static final URI NAME = URI.create("glusterfs:///");
+ private Path workingDir;
+ public static final int OVERRIDE_WRITE_BUFFER_SIZE = 1024 * 4;
+ public static final int OPTIMAL_WRITE_BUFFER_SIZE = 1024 * 128;
+
+ public GlusterVolume vol;
+ public GlusterClient client;
+
+ public GlusterfsVolume() {}
+
+ public GlusterfsVolume(Configuration conf) {
+ this.setConf(conf);
+
+ }
+
+ private Path makeAbsolute(Path f){
+ if (f.isAbsolute()) {
+ return f;
+ } else {
+ return new Path(workingDir, f);
+ }
+ }
+
+
+ public URI getUri(){
+ return NAME;
+ }
+
+
+
+ public void setConf(Configuration conf){
+
+ if (conf == null)
+ return;
+
+ super.setConf(conf);
+
+ String volume = conf.get("fs.glusterfs.volume", "gv0");
+ String server = conf.get("fs.glusterfs.server", "localhost");
+
+ client = new GlusterClient(server);
+
+ try {
+ vol = client.connect(volume);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ Path workingDirectory = getInitialWorkingDirectory();
+ try {
+ mkdirs(workingDirectory);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ setWorkingDirectory(workingDirectory);
+ System.out.println("Working directory is : " + getWorkingDirectory());
+
+ /**
+ * Write Buffering
+ */
+ Integer userBufferSize = conf.getInt("io.file.buffer.size", -1);
+ if (userBufferSize == OVERRIDE_WRITE_BUFFER_SIZE || userBufferSize == -1) {
+ conf.setInt("io.file.buffer.size", OPTIMAL_WRITE_BUFFER_SIZE);
+ }
+
+ }
+
+ public String pathOnly(Path path){
+
+ return makeQualified(path).toUri().getPath();
+ }
+
+
+
+ public void initialize(URI uri, Configuration conf) throws IOException{
+ super.initialize(uri, conf);
+ setConf(conf);
+ }
+
+ class TrackingInputStreamWrapper extends InputStream {
+
+ IGlusterInputStream ios = null;
+ long bytesRead = 0;
+
+ public TrackingInputStreamWrapper(IGlusterInputStream ios) throws IOException {
+ this.ios = ios;
+ }
+
+ public IGlusterInputStream getChannel(){
+ return this.ios;
+ }
+
+
+ public int read() throws IOException{
+ int result = ios.read();
+ if (result != -1) {
+ bytesRead += result;
+ statistics.incrementBytesRead(1);
+ }
+ return result;
+ }
+
+
+ public int read(byte[] data) throws IOException{
+ int result = ios.read(data,0,data.length);
+ if (result != -1) {
+ bytesRead += result;
+ statistics.incrementBytesRead(result);
+ }
+ return result;
+ }
+
+
+ public int read(byte[] data, int offset, int length) throws IOException{
+ int result = ios.read(data, offset, length);
+
+ if (result != -1) {
+ bytesRead += result;
+ statistics.incrementBytesRead(result);
+ }
+ return result;
+ }
+ }
+
+ /*******************************************************
+ * For open()'s FSInputStream.
+ *******************************************************/
+ class GlussterFileInputStream extends FSInputStream {
+ private TrackingInputStreamWrapper fis;
+ private long bytesReadThisStream = 0;
+ private String fileName = null;
+ IGlusterInputStream gis;
+
+ public GlussterFileInputStream(Path f) throws IOException {
+ gis = vol.open(pathOnly(f)).bufferedInputStream();
+ fileName = f.toString();
+ this.fis = new TrackingInputStreamWrapper(gis);
+ }
+
+
+ public void seek(long pos) throws IOException{
+ gis.seek(pos);
+ }
+
+
+ public long getPos() throws IOException{
+ return gis.offset();
+ }
+
+
+ public boolean seekToNewSource(long targetPos) throws IOException{
+ seek(targetPos);
+ return true;
+ }
+
+
+ public int available() throws IOException{
+ return fis.available();
+ }
+
+
+ public void close() throws IOException{
+ fis.close();
+ }
+
+
+ public boolean markSupported(){
+ return ((InputStream) gis).markSupported();
+ }
+
+
+ public int read() throws IOException{
+ bytesReadThisStream++;
+ return fis.read();
+ }
+
+
+ public int read(byte[] b, int off, int len) throws IOException{
+ int read = fis.read(b, off, len);
+ bytesReadThisStream += read;
+ return read;
+ }
+
+
+ public int read(long position, byte[] b, int off, int len) throws IOException{
+ seek(position);
+ int read = fis.getChannel().read(b, off, len);
+ return read;
+ }
+
+
+ public long skip(long n) throws IOException{
+ return fis.skip(n);
+ }
+
+ }
+
+
+ public FSDataInputStream open(Path f, int bufferSize) throws IOException{
+ if (!exists(f)) {
+ throw new FileNotFoundException(f.toString());
+ }
+ f = makeQualified(f);
+ return new FSDataInputStream(new BufferedFSInputStream(new GlussterFileInputStream(f), bufferSize));
+ }
+
+ /*********************************************************
+ * For create()'s FSOutputStream.
+ *********************************************************/
+ class GlusterFileOutputStream extends OutputStream {
+ private IGlusterOutputStream fos;
+
+ private GlusterFileOutputStream(Path f, boolean append) throws IOException {
+ GlusterFile file = vol.open(pathOnly(f));
+ try {
+ if (!file.exists()) {
+ file.createNewFile();
+ }
+ } catch (Exception ex) {
+ throw new IOException("Error creating " + f + ":\n" + ex);
+ }
+
+ this.fos = file.outputStream();
+ if (append) {
+ this.fos.position(file.length() - 1);
+ }
+ }
+
+
+ public void close() throws IOException{
+ fos.close();
+ }
+
+
+ public void flush() throws IOException{
+ fos.flush();
+ }
+
+
+ public void write(byte[] b, int off, int len) throws IOException{
+ fos.write(b, off, len);
+ }
+
+
+ public void write(int b) throws IOException{
+ fos.write(b);
+ }
+ }
+
+ public BlockLocation[] getFileBlockLocations(FileStatus file,long start,long len) throws IOException{
+ GlusterFile f = vol.open(pathOnly(file.getPath()));
+ BlockLocation[] result=null;
+ GlusterfsXattr at = new GlusterfsXattr();
+ return at.getPathInfo(f, start, len);
+
+ }
+
+ public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException{
+ f = makeQualified(f);
+ if (!exists(f)) {
+ throw new FileNotFoundException("File " + f + " not found");
+ }
+ if (getFileStatus(f).isDirectory()) {
+ throw new IOException("Cannot append to a diretory (=" + f + " )");
+ }
+ return new FSDataOutputStream(new BufferedOutputStream(new GlusterFileOutputStream(f, true), bufferSize), statistics);
+ }
+
+
+ public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException{
+ f = makeQualified(f);
+ return create(f, overwrite, true, bufferSize, replication, blockSize, progress);
+ }
+
+ private FSDataOutputStream create(Path f, boolean overwrite, boolean createParent, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException{
+ f = makeQualified(f);
+ if (exists(f) && !overwrite) {
+ throw new IOException("File already exists: " + f);
+ }
+
+ Path parent = f.getParent();
+ if (parent != null && !mkdirs(parent)) {
+ throw new IOException("Mkdirs failed to create " + parent.toString());
+ }
+ return new FSDataOutputStream(new BufferedOutputStream(new GlusterFileOutputStream(f, false), bufferSize), statistics);
+ }
+
+
+ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet flags, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException{
+ f = makeQualified(f);
+
+ if (exists(f) && !flags.contains(CreateFlag.OVERWRITE)) {
+ throw new IOException("File already exists: " + f);
+ }
+ return new FSDataOutputStream(new BufferedOutputStream(new GlusterFileOutputStream(f, false), bufferSize), statistics);
+ }
+
+
+ public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException{
+ f = makeQualified(f);
+ FSDataOutputStream out = create(f, overwrite, bufferSize, replication, blockSize, progress);
+ setPermission(f, permission);
+ return out;
+ }
+
+
+ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException{
+ f = makeQualified(f);
+ FSDataOutputStream out = create(f, overwrite, false, bufferSize, replication, blockSize, progress);
+ setPermission(f, permission);
+ return out;
+ }
+
+
+ public boolean rename(Path src, Path dst) throws IOException{
+ if (vol.open(pathOnly(src)).renameTo(vol.open(pathOnly(dst)))) {
+ return true;
+ }
+ return FileUtil.copy(this, src, this, dst, true, getConf());
+ }
+
+ /**
+ * Delete the given path to a file or directory.
+ *
+ * @param p
+ * the path to delete
+ * @param recursive
+ * to delete sub-directories
+ * @return true if the file or directory and all its contents were deleted
+ * @throws IOException
+ * if p is non-empty and recursive is false
+ */
+
+ public boolean delete(Path p, boolean recursive) throws IOException{
+ p = makeQualified(p);
+ GlusterFile file = vol.open(pathOnly(p));
+ return file.delete(recursive);
+ }
+
+
+ public FileStatus[] listStatus(Path f) throws IOException{
+ f = makeQualified(f);
+
+ GlusterFile localf = vol.open(pathOnly(f));
+ FileStatus[] results;
+
+ if (!localf.exists()) {
+ throw new FileNotFoundException("File " + f + " does not exist");
+ }
+ if (localf.isFile()) {
+ return new FileStatus[] { new GlusterFileStatus(localf, this) };
+ }
+
+ GlusterFile[] names = localf.listFiles();
+ if (names == null) {
+ return null;
+ }
+ results = new FileStatus[names.length];
+ int j = 0;
+ for (int i = 0; i < names.length; i++) {
+ try {
+ results[j] = getFileStatus(new Path(names[i].getPath()));
+ j++;
+ } catch (FileNotFoundException e) {
+ // ignore the files not found since the dir list may have have
+ // changed
+ // since the names[] list was generated.
+ }
+ }
+ if (j == names.length) {
+ return results;
+ }
+ return Arrays.copyOf(results, j);
+ }
+
+ /**
+ * Creates the specified directory hierarchy. Does not treat existence as an
+ * error.
+ */
+
+
+ public boolean mkdirs(Path f) throws IOException{
+ f = makeQualified(f);
+
+ if (f == null) {
+ throw new IllegalArgumentException("mkdirs path arg is null");
+ }
+ Path parent = f.getParent();
+ GlusterFile p2f = vol.open(pathOnly(f));
+ if (parent != null) {
+ GlusterFile parent2f = vol.open(pathOnly(parent));
+ if (parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
+ throw new FileAlreadyExistsException("Parent path is not a directory: " + parent);
+ }
+ }
+ return (parent == null || mkdirs(parent)) && (p2f.mkdir() || p2f.isDirectory());
+ }
+
+
+ public boolean mkdirs(Path f, FsPermission permission) throws IOException{
+ boolean b = mkdirs(f);
+ if (b) {
+ setPermission(f, permission);
+ }
+ return b;
+ }
+
+
+ protected boolean primitiveMkdir(Path f, FsPermission absolutePermission) throws IOException{
+ boolean b = mkdirs(f);
+ setPermission(f, absolutePermission);
+ return b;
+ }
+
+
+ public Path getHomeDirectory(){
+ return this.makeQualified(new Path(System.getProperty("user.home")));
+ }
+
+ /**
+ * Set the working directory to the given directory.
+ */
+
+ public void setWorkingDirectory(Path newDir){
+ workingDir = makeAbsolute(newDir);
+ checkPath(workingDir);
+
+ }
+
+
+ public Path getWorkingDirectory(){
+ return workingDir;
+ }
+
+
+ protected Path getInitialWorkingDirectory(){
+ return new Path(GlusterfsVolume.NAME + "user/" + System.getProperty("user.name"));
+ }
+
+
+ public FsStatus getStatus(Path p) throws IOException{
+ p = makeQualified(p);
+ // assume for now that we're only dealing with one volume.
+ // GlusterFile partition = vol.open(pathOnly(p == null ? new Path("/") :
+ // p));
+ // File provides getUsableSpace() and getFreeSpace()
+ // File provides no API to obtain used space, assume used = total - free
+ return new GlusterFsStatus(vol);
+ }
+
+
+ public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException{
+ return fsOutputFile;
+ }
+
+ // It's in the right place - nothing to do.
+
+ public void completeLocalOutput(Path fsWorkingFile, Path tmpLocalFile) throws IOException{
+ }
+
+
+ public void close() throws IOException{
+ super.close();
+ }
+
+
+ public String toString(){
+ return "GlusterFs Volume - glide:" + vol.getName();
+ }
+
+
+ public FileStatus getFileStatus(Path f) throws IOException{
+ f = makeQualified(f);
+
+ GlusterFile file = vol.open(pathOnly(f));
+
+ if (file.exists()) {
+ return new GlusterFileStatus(vol.open(pathOnly(f)), this);
+ } else {
+ throw new FileNotFoundException("File " + f + " does not exist.");
+ }
+ }
+
+
+ public void setOwner(Path p, String username, String groupname) throws IOException{
+ p = makeQualified(p);
+ if (username == null && groupname == null) {
+ throw new IOException("username == null && groupname == null");
+ }
+ GlusterFile gf = vol.open(pathOnly(p));
+ long gid = -1;
+ long uid = -1;
+
+ if (username == null) {
+ uid = gf.getUid();
+ }
+
+ if (groupname == null) {
+ gid = gf.getGid();
+ }
+
+ }
+
+
+ public void setPermission(Path p, FsPermission permission) throws IOException{
+ p = makeQualified(p);
+ GlusterFile gf = vol.open(pathOnly(p));
+ gf.chmod(permission.toShort());
+ }
+
+}
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterfsXattr.java b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterfsXattr.java
new file mode 100644
index 00000000..6faca544
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/GlusterfsXattr.java
@@ -0,0 +1,452 @@
+/**
+ *
+ * Copyright (c) 2011 Gluster, Inc.
+ * This file is part of GlusterFS.
+ *
+ * Licensed under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.fs.libgfsio;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.TreeMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.libgfsio.GlusterFSBrickClass;
+import org.apache.hadoop.fs.libgfsio.GlusterFSBrickRepl;
+import org.gluster.fs.GlusterFile;
+
+public class GlusterfsXattr {
+
+ public enum LAYOUT {
+ D, S, R, DS, DR, SR, DSR
+ }
+
+ public enum CMD {
+ GET_HINTS, GET_REPLICATION, GET_BLOCK_SIZE, CHECK_FOR_QUICK_IO
+ }
+
+ private static String hostname;
+
+ private static final String XATTR_PROP = "trusted.glusterfs.pathinfo";
+
+
+ public GlusterfsXattr() {}
+
+ public String brick2host(String brick) throws IOException{
+ String[] hf = null;
+
+ hf = brick.split(":");
+ if (hf.length != 2) {
+ System.out.println("brick not of format hostname:path");
+ throw new IOException("Error getting hostname from brick");
+ }
+
+ return hf[0];
+ }
+
+ public String brick2file(String brick) throws IOException{
+ String[] hf = null;
+
+ hf = brick.split(":");
+ if (hf.length != 2) {
+ System.out.println("brick not of format hostname:path");
+ throw new IOException("Error getting hostname from brick");
+ }
+
+ return hf[1];
+ }
+
+ public BlockLocation[] getPathInfo(GlusterFile filename, long start, long len) throws IOException{
+ HashMap> vol = null;
+ HashMap meta = new HashMap();
+
+ vol = execGetFattr(filename, meta, CMD.GET_HINTS);
+
+ return getHints(vol, meta, start, len, null);
+ }
+
+ public long getBlockSize(GlusterFile filename) throws IOException{
+ HashMap> vol = null;
+ HashMap meta = new HashMap();
+
+ vol = execGetFattr(filename, meta, CMD.GET_BLOCK_SIZE);
+
+ if (!meta.containsKey("block-size"))
+ return 0;
+
+ return meta.get("block-size");
+
+ }
+
+ public short getReplication(GlusterFile filename) throws IOException{
+ HashMap> vol = null;
+ HashMap meta = new HashMap();
+
+ vol = execGetFattr(filename, meta, CMD.GET_REPLICATION);
+
+ return (short) getReplicationFromLayout(vol, meta);
+
+ }
+
+ public TreeMap quickIOPossible(GlusterFile filename, long start, long len) throws IOException{
+ String realpath = null;
+ HashMap> vol = null;
+ HashMap meta = new HashMap();
+ TreeMap hnts = new TreeMap();
+
+ vol = execGetFattr(filename, meta, CMD.GET_HINTS);
+ getHints(vol, meta, start, len, hnts);
+
+ if (hnts.size() == 0)
+ return null; // BOOM !!
+
+ // DEBUG - dump hnts here
+ return hnts;
+ }
+
+ public HashMap> execGetFattr(GlusterFile filename, HashMap meta, CMD cmd) throws IOException{
+ Process p = null;
+ BufferedReader brInput = null;
+ String s = null;
+ String cmdOut = null;
+ String getfattrCmd = null;
+ String xlator = null;
+ String enclosingXl = null;
+ String enclosingXlVol = null;
+ String key = null;
+ String layout = "";
+ int rcount = 0;
+ int scount = 0;
+ int dcount = 0;
+ int count = 0;
+
+ HashMap> vol = new HashMap>();
+
+ cmdOut = filename.getXAttr(GlusterfsXattr.XATTR_PROP);
+
+ /**
+ * TODO: Use a single regex for extracting posix paths as well as xlator
+ * counts for layout matching.
+ */
+
+ Pattern pattern = Pattern.compile("<(.*?)[:\\(](.*?)>");
+ Matcher matcher = pattern.matcher(cmdOut);
+
+ Pattern p_px = Pattern.compile(".*?:(.*)");
+ Matcher m_px;
+ String gibberish_path;
+
+ s = null;
+ while (matcher.find()) {
+ xlator = matcher.group(1);
+ if (xlator.equalsIgnoreCase("posix")) {
+ if (enclosingXl.equalsIgnoreCase("replicate"))
+ count = rcount;
+ else if (enclosingXl.equalsIgnoreCase("stripe"))
+ count = scount;
+ else if (enclosingXl.equalsIgnoreCase("distribute"))
+ count = dcount;
+ else
+ throw new IOException("Unknown Translator: " + enclosingXl);
+
+ key = enclosingXl + "-" + count;
+
+ if (vol.get(key) == null)
+ vol.put(key, new ArrayList());
+
+ gibberish_path = matcher.group(2);
+
+ /* extract posix path from the gibberish string */
+ m_px = p_px.matcher(gibberish_path);
+ if (!m_px.find())
+ throw new IOException("Cannot extract posix path");
+
+ vol.get(key).add(m_px.group(1));
+ continue;
+ }
+
+ enclosingXl = xlator;
+ enclosingXlVol = matcher.group(2);
+
+ if (xlator.equalsIgnoreCase("replicate"))
+ if (rcount++ != 0)
+ continue;
+
+ if (xlator.equalsIgnoreCase("stripe")) {
+ if (scount++ != 0)
+ continue;
+
+ Pattern ps = Pattern.compile("\\[(\\d+)\\]");
+ Matcher ms = ps.matcher(enclosingXlVol);
+
+ if (ms.find()) {
+ if (((cmd == CMD.GET_BLOCK_SIZE) || (cmd == CMD.GET_HINTS)) && (meta != null))
+ meta.put("block-size", Integer.parseInt(ms.group(1)));
+ } else
+ throw new IOException("Cannot get stripe size");
+ }
+
+ if (xlator.equalsIgnoreCase("distribute"))
+ if (dcount++ != 0)
+ continue;
+
+ layout += xlator.substring(0, 1);
+ }
+
+ if ((dcount == 0) && (scount == 0) && (rcount == 0))
+ throw new IOException("Cannot get layout");
+
+ if (meta != null) {
+ meta.put("dcount", dcount);
+ meta.put("scount", scount);
+ meta.put("rcount", rcount);
+ }
+
+ vol.put("layout", new ArrayList(1));
+ vol.get("layout").add(layout);
+
+ return vol;
+ }
+
+ BlockLocation[] getHints(HashMap> vol, HashMap meta, long start, long len, TreeMap hnts) throws IOException{
+ String brick = null;
+ String key = null;
+ boolean done = false;
+ int i = 0;
+ int counter = 0;
+ int stripeSize = 0;
+ long stripeStart = 0;
+ long stripeEnd = 0;
+ int nrAllocs = 0;
+ int allocCtr = 0;
+ BlockLocation[] result = null;
+ ArrayList brickList = null;
+ ArrayList stripedBricks = null;
+ Iterator it = null;
+
+ String[] blks = null;
+ GlusterFSBrickRepl[] repl = null;
+ int dcount, scount, rcount;
+
+ LAYOUT l = LAYOUT.valueOf(vol.get("layout").get(0));
+ dcount = meta.get("dcount");
+ scount = meta.get("scount");
+ rcount = meta.get("rcount");
+
+ switch (l) {
+ case D:
+ key = "DISTRIBUTE-" + dcount;
+ brick = vol.get(key).get(0);
+
+ if (hnts == null) {
+ result = new BlockLocation[1];
+ result[0] = new BlockLocation(null, new String[] { brick2host(brick) }, start, len);
+ } else
+ hnts.put(0, new GlusterFSBrickClass(brick, start, len, false, -1, -1, -1));
+ break;
+
+ case R:
+ case DR:
+ /* just the name says it's striped - the volume isn't */
+ stripedBricks = new ArrayList();
+
+ for (i = 1; i <= rcount; i++) {
+ key = "REPLICATE-" + i;
+ brickList = vol.get(key);
+ it = brickList.iterator();
+ while (it.hasNext()) {
+ stripedBricks.add(it.next());
+ }
+ }
+
+ nrAllocs = stripedBricks.size();
+ if (hnts == null) {
+ result = new BlockLocation[1];
+ blks = new String[nrAllocs];
+ }
+
+ for (i = 0; i < nrAllocs; i++) {
+ if (hnts == null)
+ blks[i] = brick2host(stripedBricks.get(i));
+ else
+ hnts.put(i, new GlusterFSBrickClass(stripedBricks.get(i), start, len, false, -1, -1, -1));
+ }
+
+ if (hnts == null)
+ result[0] = new BlockLocation(null, blks, start, len);
+
+ break;
+
+ case SR:
+ case DSR:
+ int rsize = 0;
+ ArrayList> replicas = new ArrayList>();
+
+ stripedBricks = new ArrayList();
+
+ if (rcount == 0)
+ throw new IOException("got replicated volume with replication count 0");
+
+ for (i = 1; i <= rcount; i++) {
+ key = "REPLICATE-" + i;
+ brickList = vol.get(key);
+ it = brickList.iterator();
+ replicas.add(i - 1, new ArrayList());
+ while (it.hasNext()) {
+ replicas.get(i - 1).add(it.next());
+ }
+ }
+
+ stripeSize = meta.get("block-size");
+
+ nrAllocs = (int) (((len - start) / stripeSize) + 1);
+ if (hnts == null) {
+ result = new BlockLocation[nrAllocs];
+ repl = new GlusterFSBrickRepl[nrAllocs];
+ }
+
+ // starting stripe position
+ counter = (int) ((start / stripeSize) % rcount);
+ stripeStart = start;
+
+ key = null;
+ int currAlloc = 0;
+ boolean hntsDone = false;
+ while ((stripeStart < len) && !done) {
+ stripeEnd = (stripeStart - (stripeStart % stripeSize)) + stripeSize - 1;
+ if (stripeEnd > start + len) {
+ stripeEnd = start + len - 1;
+ done = true;
+ }
+
+ rsize = replicas.get(counter).size();
+
+ if (hnts == null)
+ repl[allocCtr] = new GlusterFSBrickRepl(rsize, stripeStart, (stripeEnd - stripeStart));
+
+ for (i = 0; i < rsize; i++) {
+ brick = replicas.get(counter).get(i);
+ currAlloc = (allocCtr * rsize) + i;
+
+ if (hnts == null)
+ repl[allocCtr].addHost(brick2host(brick));
+ else if (currAlloc <= (rsize * rcount) - 1) {
+ hnts.put(currAlloc, new GlusterFSBrickClass(brick, stripeStart, (stripeEnd - stripeStart), true, stripeSize, rcount, rsize));
+ } else
+ hntsDone = true;
+ }
+
+ if (hntsDone)
+ break;
+
+ stripeStart = stripeEnd + 1;
+
+ allocCtr++;
+ counter++;
+
+ if (counter >= replicas.size())
+ counter = 0;
+ }
+
+ if (hnts == null)
+ for (int k = 0; k < nrAllocs; k++)
+ result[k] = new BlockLocation(null, repl[k].getReplHosts(), repl[k].getStartLen(), repl[k].getOffLen());
+
+ break;
+
+ case S:
+ case DS:
+ if (scount == 0)
+ throw new IOException("got striped volume with stripe count 0");
+
+ stripedBricks = new ArrayList();
+ stripeSize = meta.get("block-size");
+
+ key = "STRIPE-" + scount;
+ brickList = vol.get(key);
+ it = brickList.iterator();
+ while (it.hasNext()) {
+ stripedBricks.add(it.next());
+ }
+
+ nrAllocs = (int) ((len - start) / stripeSize) + 1;
+ if (hnts == null)
+ result = new BlockLocation[nrAllocs];
+
+ // starting stripe position
+ counter = (int) ((start / stripeSize) % stripedBricks.size());
+ stripeStart = start;
+
+ key = null;
+ while ((stripeStart < len) && !done) {
+ brick = stripedBricks.get(counter);
+
+ stripeEnd = (stripeStart - (stripeStart % stripeSize)) + stripeSize - 1;
+ if (stripeEnd > start + len) {
+ stripeEnd = start + len - 1;
+ done = true;
+ }
+
+ if (hnts == null)
+ result[allocCtr] = new BlockLocation(null, new String[] { brick2host(brick) }, stripeStart, (stripeEnd - stripeStart));
+ else if (allocCtr <= stripedBricks.size()) {
+ hnts.put(allocCtr, new GlusterFSBrickClass(brick, stripeStart, (stripeEnd - stripeStart), true, stripeSize, stripedBricks.size(), -1));
+ } else
+ break;
+
+ stripeStart = stripeEnd + 1;
+
+ counter++;
+ allocCtr++;
+
+ if (counter >= stripedBricks.size())
+ counter = 0;
+ }
+
+ break;
+ }
+
+ return result;
+ }
+
+ /* TODO: use meta{dcount,scount,rcount} for checking */
+ public int getReplicationFromLayout(HashMap> vol, HashMap meta) throws IOException{
+ int replication = 0;
+ LAYOUT l = LAYOUT.valueOf(vol.get("layout").get(0));
+
+ switch (l) {
+ case D:
+ case S:
+ case DS:
+ replication = 1;
+ break;
+
+ case R:
+ case DR:
+ case SR:
+ case DSR:
+ final String key = "REPLICATION-1";
+ replication = vol.get(key).size();
+ }
+
+ return replication;
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/IdLookup.java b/src/main/java/org/apache/hadoop/fs/libgfsio/IdLookup.java
new file mode 100644
index 00000000..ebdb573f
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/IdLookup.java
@@ -0,0 +1,97 @@
+package org.apache.hadoop.fs.libgfsio;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+
+import java.io.InputStream;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class IdLookup {
+
+ public static final int UID = 1;
+ public static final int GID = 2;
+
+ public static int getUid(String name) throws IOException{
+ return getId(name, UID);
+ }
+
+ public static int getGid(String name) throws IOException{
+ return getId(name, GID);
+ }
+
+ protected static int getId(String name, int type){
+
+ String userName = System.getProperty("user.name");
+ String arg = null;
+
+ switch(type){
+ case IdLookup.UID :
+ arg = "-u";
+ break;
+
+ case IdLookup.GID :
+ arg = "-g";
+ break;
+ }
+
+ String command = "id "+ arg + " " + userName;
+ Process child = null;
+ try {
+ child = Runtime.getRuntime().exec(command);
+ } catch (IOException e) {
+
+ e.printStackTrace();
+ }
+
+ // Get the input stream and read from it
+ InputStream in = child.getInputStream();
+ String output = new String();
+ int c;
+ try{
+ while ((c = in.read()) != -1) {
+ output+=c;
+ }
+ in.close();
+ }catch(IOException ex){
+
+ }
+ return Integer.parseInt(output);
+
+ }
+
+ public static String getName(int id){
+ String command = "getent passwd";
+ Process child=null;
+ String s=null;
+ try {
+ child = Runtime.getRuntime().exec(command);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ BufferedReader brInput=new BufferedReader(new InputStreamReader(child.getInputStream()));
+
+ String cmdOut="";
+ try {
+ while ((s=brInput.readLine())!=null)
+ cmdOut+=s;
+ } catch (IOException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ String pattern = "^(\\w*)\\:\\w*\\:" + id+":";
+ Matcher results = Pattern.compile(pattern).matcher(cmdOut);
+ String name = null;
+ try{
+ results.find();
+ name = results.group(1);
+ }catch(IllegalStateException ex){
+ // user not found
+ }
+
+ return name;
+
+ }
+
+}
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/Util.java b/src/main/java/org/apache/hadoop/fs/libgfsio/Util.java
new file mode 100644
index 00000000..23c2f1a8
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/Util.java
@@ -0,0 +1,53 @@
+/**
+ *
+ * Copyright (c) 2013 Red Hat, Inc.
+ * This file is part of GlusterFS.
+ *
+ * Licensed under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * Implements the Hadoop FileSystem Interface to allow applications to store
+ * files on GlusterFS and run Map/Reduce jobs on the data.
+ *
+ *
+ */
+
+package org.apache.hadoop.fs.libgfsio;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.util.Shell;
+
+public class Util{
+
+ public static String execCommand(File f,String...cmd) throws IOException{
+ String[] args=new String[cmd.length+1];
+ System.arraycopy(cmd, 0, args, 0, cmd.length);
+ args[cmd.length]=FileUtil.makeShellPath(f, true);
+ String output=Shell.execCommand(args);
+ return output;
+ }
+
+ /* copied from unstalbe hadoop API org.apache.hadoop.Shell */
+ public static String[] getGET_PERMISSION_COMMAND(){
+ // force /bin/ls, except on windows.
+ return new String[]{(WINDOWS ? "ls" : "/bin/ls"),"-ld"};
+ }
+
+ /* copied from unstalbe hadoop API org.apache.hadoop.Shell */
+
+ public static final boolean WINDOWS /* borrowed from Path.WINDOWS */
+ =System.getProperty("os.name").startsWith("Windows");
+ // / loads permissions, owner, and group from `ls -ld`
+}
diff --git a/src/main/java/org/apache/hadoop/fs/libgfsio/Version.java b/src/main/java/org/apache/hadoop/fs/libgfsio/Version.java
new file mode 100644
index 00000000..46a8f86d
--- /dev/null
+++ b/src/main/java/org/apache/hadoop/fs/libgfsio/Version.java
@@ -0,0 +1,51 @@
+package org.apache.hadoop.fs.libgfsio;
+
+import java.io.IOException;
+
+import java.util.Properties;
+
+/**
+ * Versioning stuff for the shim. This class is not tested since there is no
+ * deterministic behaviour (i.e. it might not work if not building from binary),
+ * and the effects are pure side effects.
+ */
+public class Version extends Properties{
+ public Version() {
+ super();
+ try{
+ load(this.getClass().getClassLoader().getResourceAsStream("git.properties"));
+ }
+ catch(Throwable t){
+ throw new RuntimeException("Couldn't find git properties for version info " + t.getMessage());
+ }
+ }
+ public String getTag(){
+ return this.getProperty("git.commit.id.describe").split("-")[0];
+ }
+
+ /**
+ * For use with terminal version checking.
+
+ Example, run with an argument to get single property:
+ java -cp /home/Development/hadoop-glusterfs/glusterfs-2.0-SNAPSHOT.jar \
+ org.apache.hadoop.fs.glusterfs.Version git.commit.id.describe | cut -d'-' -f 1
+
+ Or just run (no args, prints all properties)
+ java -cp /home/Development/hadoop-glusterfs/glusterfs-2.0-SNAPSHOT.jar \
+ */
+ public static void main(String[] args){
+ Version v = new Version();
+ //Dump the whole version info if no arg
+ if(args.length==0){
+ System.out.println(v);
+ }
+ //if specific arg given, print just that.
+ else{
+ String prop = v.get(args[0])+"";
+ System.out.println(
+ prop!=null?
+ prop
+ :"Couldnt find property "+prop);
+ }
+ }
+}
\ No newline at end of file