Skip to content

Commit d323b40

Browse files
committed
update pom to load conf
1 parent 47998bf commit d323b40

File tree

12 files changed

+454
-1494
lines changed

12 files changed

+454
-1494
lines changed

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
target/
2-
.settings
32
.project
43
.classpath
4+
.settings/

pom.xml

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
<url>http://maven.apache.org</url>
1010
<dependencies>
1111
<dependency>
12-
<groupId>junit</groupId>
12+
<groupId>junit</groupId>
1313
<artifactId>junit</artifactId>
1414
<version>4.4</version>
1515
</dependency>
@@ -20,7 +20,33 @@
2020
</dependency>
2121
</dependencies>
2222
<build>
23+
24+
<!-- testResources not necessary for now, since
25+
we can easily acess conf/core-site.xml by using its absolute
26+
root path. "/conf/core-site.xml"
27+
<testResources>
28+
<testResource>
29+
<directory>${project.basedir}/conf/</directory>
30+
</testResource>
31+
</testResources>
32+
-->
33+
2334
<plugins>
35+
<plugin>
36+
<groupId>org.apache.maven.plugins</groupId>
37+
<artifactId>maven-surefire-plugin</artifactId>
38+
</plugin>
39+
40+
<plugin>
41+
<groupId>org.apache.maven.plugins</groupId>
42+
<artifactId>maven-compiler-plugin</artifactId>
43+
<version>2.3.2</version>
44+
<configuration>
45+
<source>1.5</source>
46+
<target>1.5</target>
47+
</configuration>
48+
</plugin>
49+
2450
<plugin>
2551
<groupId>org.apache.maven.plugins</groupId>
2652
<artifactId>maven-compiler-plugin</artifactId>
@@ -30,6 +56,8 @@
3056
<target>1.5</target>
3157
</configuration>
3258
</plugin>
59+
60+
3361
</plugins>
3462
</build>
3563
</project>

resources/setup.test.env.sh

Lines changed: 153 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,153 @@
1+
#* Copyright (c) 2011 Gluster, Inc. <http://www.gluster.com>
2+
# This file is part of GlusterFS.
3+
#
4+
# Licensed under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13+
# implied. See the License for the specific language governing
14+
# permissions and limitations under the License.
15+
#
16+
# This script creates a gluster volume using block files mounted on loopback as bricks.
17+
# The puprose is to allow scripted create and delete of gluster volumes without needing to alter
18+
# disk partition or volume structure.
19+
20+
print_help() {
21+
echo "Usage: $0 [OPTION]..."
22+
echo ""
23+
echo "Options:"
24+
echo " -w, --work <directory> working directory for gluster block device bricks"
25+
echo " -v, --volume <gluster volume> gluster volume to create"
26+
echo " -h, --help show this message"
27+
echo ""
28+
echo "Note: bricks and gluster volume will not persist on reboot. Please edit fstab manually if you wish."
29+
}
30+
31+
# parse options
32+
while [ "${1+isset}" ]; do
33+
case "$1" in
34+
-v|--volume)
35+
GLUSTER_VOLUME=$2
36+
shift 2
37+
;;
38+
-w|--work)
39+
WORK_DIR=$2
40+
shift 2
41+
;;
42+
-h|--help)
43+
print_help
44+
exit
45+
;;
46+
*)
47+
echo "Error: Unknown option: $1" >&2
48+
exit 1
49+
;;
50+
esac
51+
done
52+
53+
if [ -z ${GLUSTER_VOLUME} ]
54+
then
55+
echo "I am error. No volume specified."
56+
echo ""
57+
print_help
58+
exit 1;
59+
fi
60+
61+
if [ -z ${WORK_DIR} ]
62+
then
63+
echo "I am error. No temp directory set."
64+
echo ""
65+
print_help
66+
exit 1;
67+
fi
68+
69+
NUMBER_OF_BRICKS=3
70+
71+
mkdir -p ${WORK_DIR}/rpm
72+
mkdir -p ${WORK_DIR}/bricks
73+
mkdir -p ${WORK_DIR}/blocks
74+
75+
76+
77+
# Grap and extract the RPM
78+
#cd ${WORK_DIR}/rpm
79+
#wget ${GLUSTER_URL}
80+
#rpm2cpio *.rpm | cpio -idmv
81+
82+
#create some loopback bricks
83+
84+
createGlusterVolume(){
85+
LOOP_BRICKS=( )
86+
87+
# TODO: Need to run a force on this command so it doesn't prompt
88+
89+
GLUSTER_VOLUME_CMD="gluster volume create ${GLUSTER_VOLUME} "
90+
HOSTNAME=`hostname`
91+
92+
for (( i = 0 ; i <= ${NUMBER_OF_BRICKS}; i++ ))
93+
do
94+
# Create an empty block device to use as loop back filesystem
95+
BLOCK=${WORK_DIR}/blocks/block${i}
96+
BRICK=${WORK_DIR}/bricks/brick${i}
97+
98+
dd if=/dev/zero of=${BLOCK} bs=1024 count=30720
99+
100+
# find a free loopback device
101+
# LOOP_BRICKS[${i}]=${WORK_DIR}/bricks/brick${i}
102+
LB_BRICK=`losetup -f`
103+
LOOP_BRICKS[${i}]="${LB_BRICK}"
104+
105+
106+
echo "creating loopback file system on loopback: ${LB_BRICK} block: ${BLOCK}"
107+
losetup ${LB_BRICK} ${BLOCK}
108+
echo "Making loopback block brick on ${LB_BRICK}"
109+
# mkfs.xfs -m 1 -v ${BLOCK}
110+
mkfs -t ext4 -m 1 -v ${LB_BRICK}
111+
112+
113+
mkdir -p ${BRICK}
114+
mount -t ext4 $LB_BRICK $BRICK
115+
GLUSTER_VOLUME_CMD="${GLUSTER_VOLUME_CMD} ${HOSTNAME}:${BRICK}"
116+
done
117+
118+
# Run the gluster command to create the volume
119+
120+
121+
echo "running: ${GLUSTER_VOLUME_CMD}"
122+
$GLUSTER_VOLUME_CMD
123+
gluster volume start ${GLUSTER_VOLUME}
124+
}
125+
126+
createCleanupScript(){
127+
# clean up after ourselfs
128+
# create an unmount script for the bricks
129+
echo "#!/bin/sh" > ${WORK_DIR}/cleanup.sh
130+
chmod +x ${WORK_DIR}/cleanup.sh
131+
132+
echo "gluster volume stop ${GLUSTER_VOLUME}" >> ${WORK_DIR}/cleanup.sh
133+
echo "gluster volume delete ${GLUSTER_VOLUME}" >> ${WORK_DIR}/cleanup.sh
134+
# Unmount the bricks and loopback devices
135+
136+
for (( i = 0 ; i <= ${NUMBER_OF_BRICKS}; i++ ))
137+
do
138+
# Create an empty block device to use as loop back filesystem
139+
BLOCK=${WORK_DIR}/blocks/block${i}
140+
BRICK=${WORK_DIR}/bricks/brick${i}
141+
echo "umount $BRICK" >> ${WORK_DIR}/cleanup.sh
142+
echo "losetup -d ${LOOP_BRICKS[$i]}" >> ${WORK_DIR}/cleanup.sh
143+
echo "rm -rf $BLOCK" >> ${WORK_DIR}/cleanup.sh
144+
echo "rm -rf $BRICK" >> ${WORK_DIR}/cleanup.sh
145+
146+
done
147+
148+
echo "rm -rf ${WORK_DIR}" >> ${WORK_DIR}/cleanup.sh
149+
}
150+
151+
createGlusterVolume
152+
createCleanupScript
153+
echo "Cleanup script: ${WORK_DIR}/cleanup.sh"

src/main/org/apache/hadoop/fs/glusterfs/GlusterFSBrickClass.java

Lines changed: 0 additions & 109 deletions
This file was deleted.

src/main/org/apache/hadoop/fs/glusterfs/GlusterFSBrickRepl.java

Lines changed: 0 additions & 52 deletions
This file was deleted.

0 commit comments

Comments
 (0)