diff --git a/20_glusterfs_hadoop_sudoers b/20_glusterfs_hadoop_sudoers new file mode 100644 index 00000000..a1f51df6 --- /dev/null +++ b/20_glusterfs_hadoop_sudoers @@ -0,0 +1,3 @@ +# GlusterFS sudo settings for multi-tennancy +Defaults:%hadoop !requiretty +%hadoop ALL=NOPASSWD:/usr/bin/getfattr -m . -n trusted.glusterfs.pathinfo * diff --git a/README b/README index 3c5224a5..a3860804 100644 --- a/README +++ b/README @@ -139,14 +139,11 @@ FOR HACKERS * Source Layout (./src/) -org.apache.hadoop.fs.glusters/GlusterFSBrickClass.java -org.apache.hadoop.fs.glusters/GlusterFSXattr.java <--- Fetch/Parse Extended Attributes of a file -org.apache.hadoop.fs.glusters/GlusterFUSEInputStream.java <--- Input Stream (instantiated during open() calls; -org.apache.hadoop.fs.glusters/GlusterFSBrickRepl.java -org.apache.hadoop.fs.glusters/GlusterFUSEOutputStream.java <--- Output Stream (instantiated during creat() calls) -org.apache.hadoop.fs.glusters/GlusterFileSystem.java <--- Entry Point for the plugin (extends Hadoop FileSystem class) +For the overall architecture, see. Currently, we use the hadoop RawLocalFileSystem as +the basis - and wrap it with the GlusterVolume class. That class is then used by the +Hadoop 1x (GlusterFileSystem) and Hadoop 2x (GlusterFs) adapters. -org.gluster.test.AppTest.java <--- Your test cases go here (if any :-)) + https://forge.gluster.org/hadoop/pages/Architecture ./tools/build-deploy-jar.py <--- Build and Deployment Script ./conf/core-site.xml <--- Sample configuration file @@ -160,9 +157,6 @@ org.gluster.test.AppTest.java <--- Your test cases go here (if JENKINS ------- - At the moment, you need to run as root - this can be done by modifying this line in the jenkins init.d/ script. - This is because of the mount command issued in the GlusterFileSystem. - #Method 1) Modify JENKINS_USER in /etc/sysconfig/jenkins JENKINS_USER=root @@ -181,8 +175,43 @@ The unit tests read test resources from glusterconfig.properties - a file which 1) edit your .bashrc, or else at your terminal run : export GLUSTER_MOUNT=/mnt/glusterfs -export HCFS_FILE_SYSTEM_CONNECTOR=org.apache.hadoop.hcfs.test.connector.glusterfs.GlusterFileSystemTestConnector +export HCFS_FILE_SYSTEM_CONNECTOR=org.apache.hadoop.fs.test.connector.glusterfs.GlusterFileSystemTestConnector export HCFS_CLASSNAME=org.apache.hadoop.fs.glusterfs.GlusterFileSystem +(in eclipse - see below , you will add these at the "Run Configurations" menu, +in VM arguments, prefixed with -D, for example, "-DGLUSTER_MOUNT=x -DHCFS_FILE_SYSTEM_CONNECTOR=y ...") + 2) run: - mvn package + mvn clean package + +3) The jar artifact will be in target/ + +DEVELOPING +---------- + +0) Create a mock gluster mount: + + #Create raw disk and format it... + truncate -s 1G /export/debugging_fun.brick + sudo mkfs.xfs /export/debugging_fun.brick + + #Mount it as loopback fs + mount -o loop /export/debugging_fun.brick /mnt/mybrick ; + + #Now make a mount point for it, and also, for gluster itself + mkdir /mnt/mybrick/glusterbrick + mkdir /mnt/glusterfs + MNT="/mnt/glusterfs" + BRICK="/mnt/mybrick/glusterbrick" + + #Create a gluster volume that writes to the brick + sudo gluster volume create HadoopVol 10.10.61.230:$BRICK + + #Mount the volume on top of the newly created brick + mount -t glusterfs mount -t glusterfs $(hostname):HadoopVol $MNT + +1) Run "mvn eclipse:eclipse", and import into eclipse. + +2) Add the exported env variables above via Run Configurations as described in the above section. + +3) Develop and run unit tests as you would any other java app. \ No newline at end of file diff --git a/glusterfs-hadoop.spec b/glusterfs-hadoop.spec deleted file mode 100644 index a6196956..00000000 --- a/glusterfs-hadoop.spec +++ /dev/null @@ -1,60 +0,0 @@ -%define _libdir /usr/local/lib - -%define __mkdir_p mkdir -p -%define __rm rm -%define __cp cp - -# Plugin supported Hadoop Version -%define _hadoop_ver 0.20.2 -%define _plugin_ver 0.1 - -%define _gluster_core_version 3.3 -%define _gluster_core_release 0 - -# default install prefix -Prefix: %{_libdir} - -Summary: GlusterFS Hadoop Plugin -Name: glusterfs -Version: %{_hadoop_ver} -Release: %{_plugin_ver} -Group: Application/File -Vendor: Gluster Inc. -Packager: gluster-users@gluster.org -License: Apache - -%description -This plugin provides a transparent layer for Hadoop to run Map/Reduce -jobs on files/data present of GlusterFS. - -%package hadoop -Summary: Hadoop Plugin for GlusterFS -Group: Application/File -Requires: glusterfs-core >= %{_gluster_core_version}-%{_gluster_core_release} - -%description hadoop -This plugin provides a transparent layer for Hadoop to run Map/Reduce -jobs on files/data present of GlusterFS. - -%install -%{__rm} -rf %{buildroot} -%{__mkdir_p} %{buildroot}/%{_libdir}/conf -%{__cp} %{_libdir}/glusterfs-%{_hadoop_ver}-%{_plugin_ver}.jar %{buildroot}/%{_libdir} -%{__cp} -R %{_libdir}/conf/* %{buildroot}/%{_libdir}/conf/ - -%files hadoop -%defattr(-,root,root) -%{_libdir}/glusterfs-%{_hadoop_ver}-%{_plugin_ver}.jar -%{_libdir}/conf/core-site.xml - -%clean -%{__rm} -f %{_libdir}/glusterfs-%{_hadoop_ver}-%{_plugin_ver}.jar -%{__rm} -rf %{_libdir}/conf - -%post hadoop -echo "" -echo "=====================================================================" -echo "Plugin Files installed in the installed prefix." -echo "Create soft links from hadoop lib/ and conf/ directory to these files." -echo "=====================================================================" -echo "" \ No newline at end of file diff --git a/glusterfs-hadoop.spec.tmpl b/glusterfs-hadoop.spec.tmpl index cc198990..98428fa6 100644 --- a/glusterfs-hadoop.spec.tmpl +++ b/glusterfs-hadoop.spec.tmpl @@ -1,5 +1,6 @@ Name: rhs-hadoop Version: $version +# release number is automatically updated when soure version is the same Release: $release #if $epoch Epoch: $epoch @@ -32,6 +33,7 @@ in the hadoop configuration files and loaded at runtime as the FileSystem implem rm -rf %{buildroot} /bin/mkdir -p %{buildroot}%{_javadir} /bin/mkdir -p %{buildroot}%{hadoop_libdir} +/bin/mkdir -p %{buildroot}%{_sysconfdir}/sudoers.d #for $i, $artifact in $enumerate($all_artifacts) #if $artifact.endswith('.jar') @@ -40,6 +42,9 @@ rm -rf %{buildroot} #end if #end for +# move sudoers file to /etc/sudoers.d/ +install -m 644 20_glusterfs_hadoop_sudoers %{buildroot}%{_sysconfdir}/sudoers.d/ + %clean rm -rf %{buildroot} @@ -51,9 +56,12 @@ rm -rf %{buildroot} %{hadoop_libdir}/$artifact #end if #end for +%{_sysconfdir}/sudoers.d/20_glusterfs_hadoop_sudoers %changelog -* Wed Jan 9 2014 Jay Vyas 2.1.4 renamed +* Wed Feb 05 2014 Jeff Vance 2.1.5-2 +- installs the sudoers file. BZ 1059986 +* Wed Jan 9 2014 Jay Vyas 2.1.4-1 - rename to rhs-hadoop for release * Fri Nov 22 2013 Jay Vyas 2.1.4 diff --git a/pom.xml b/pom.xml index 805b6478..2b9335c2 100644 --- a/pom.xml +++ b/pom.xml @@ -5,7 +5,7 @@ glusterfs jar 2.1.5 - glusterfs + glusterfs-hadoop http://maven.apache.org