Skip to content

Commit 4143a3b

Browse files
committed
Add glusterfs sample configuration
This adds a sample configuration for creating a glusterfs volume that's replicated across 3 ADs. - Add networking resources required by the glusterfs servers and clients - Rename/Introduce some variables to parameterize the sample more consistently with the other samples - Added a README file to explain the configuration
1 parent ce3afb0 commit 4143a3b

File tree

9 files changed

+369
-0
lines changed

9 files changed

+369
-0
lines changed

docs/solutions/glusterfs/README.md

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
# ___ ____ _ ____ _ _____
2+
# / _ \| _ \ / \ / ___| | | ____|
3+
# | | | | |_) | / _ \| | | | | _|
4+
# | |_| | _ < / ___ | |___| |___| |___
5+
# \___/|_| \_/_/ \_\____|_____|_____|
6+
***
7+
## GlusterFS Infrastructure
8+
This configuration creates a glusterfs volume that is replicated across 3 GlusterFS servers.
9+
10+
It creates a VCN with a route table, internet gateway, and security list.
11+
The VCN spans 3 ADs with each AD containing a subnet and 2 instances: a GlusterFS server and a GlusterFS client.
12+
13+
### Using this example
14+
* Update env-vars with the required information. Most examples use the same set of environment variables so you only need to do this once.
15+
* Source env-vars
16+
* `$ . env-vars`
17+
* Update `variables.tf` with your instance options.
18+
* Update `./userdata/bootstrap` by replacing instances of `baremetal.oraclevcn.com` with `[Your VCN's DnsLabel].oraclevcn.com`.
19+
20+
### Files in the configuration
21+
22+
#### `env-vars`
23+
Is used to export the environmental variables used in the configuration. These are usually authentication related, be sure to exclude this file from your version control system. It's typical to keep this file outside of the configuration.
24+
25+
Before you plan, apply, or destroy the configuration source the file -
26+
`$ . env-vars`
27+
28+
#### `compute.tf`
29+
Defines the compute resources
30+
31+
#### `networking.tf`
32+
Defines the virtual cloud network resources used in the configuration
33+
34+
#### `variables.tf`
35+
Defines the variables used in the configuration
36+
37+
#### `datasources.tf`
38+
Defines the datasources used in the configuration
39+
40+
#### `provider.tf`
41+
Specifies and passes authentication details to the OCI TF provider
42+
43+
#### `./userdata/bootstrap`
44+
The script gets injected into an instance on launch.
45+
The script configures the glusterfs volumes on each server and sets up the glusterfs clients.
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
resource "oci_core_instance" "GlusterServerInstance" {
2+
availability_domain = "${oci_core_subnet.SubnetAD1.availability_domain}"
3+
compartment_id = "${var.compartment_ocid}"
4+
display_name = "glusterfs-server1"
5+
hostname_label = "glusterfs-server1"
6+
image = "${lookup(data.oci_core_images.ServerImageList.images[0], "id")}"
7+
shape = "${var.ServerInstanceShape}"
8+
subnet_id = "${oci_core_subnet.SubnetAD1.id}"
9+
metadata {
10+
ssh_authorized_keys = "${var.ssh_public_key}"
11+
user_data = "${base64encode(file(var.ServerBootStrapFile))}"
12+
}
13+
}
14+
15+
resource "oci_core_instance" "GlusterServerInstance2" {
16+
availability_domain = "${oci_core_subnet.SubnetAD2.availability_domain}"
17+
compartment_id = "${var.compartment_ocid}"
18+
display_name = "glusterfs-server2"
19+
hostname_label = "glusterfs-server2"
20+
image = "${lookup(data.oci_core_images.ServerImageList.images[0], "id")}"
21+
shape = "${var.ServerInstanceShape}"
22+
subnet_id = "${oci_core_subnet.SubnetAD2.id}"
23+
metadata {
24+
ssh_authorized_keys = "${var.ssh_public_key}"
25+
user_data = "${base64encode(file(var.ServerBootStrapFile))}"
26+
}
27+
}
28+
29+
resource "oci_core_instance" "GlusterServerInstance3" {
30+
availability_domain = "${oci_core_subnet.SubnetAD3.availability_domain}"
31+
compartment_id = "${var.compartment_ocid}"
32+
display_name = "glusterfs-server3"
33+
hostname_label = "glusterfs-server3"
34+
image = "${lookup(data.oci_core_images.ServerImageList.images[0], "id")}"
35+
shape = "${var.ServerInstanceShape}"
36+
subnet_id = "${oci_core_subnet.SubnetAD3.id}"
37+
metadata {
38+
ssh_authorized_keys = "${var.ssh_public_key}"
39+
user_data = "${base64encode(file(var.ServerBootStrapFile))}"
40+
}
41+
}
42+
43+
44+
resource "oci_core_instance" "GlusterClientInstance" {
45+
availability_domain = "${oci_core_subnet.SubnetAD1.availability_domain}"
46+
compartment_id = "${var.compartment_ocid}"
47+
display_name = "glusterfs-client1"
48+
hostname_label = "glusterfs-client1"
49+
image = "${lookup(data.oci_core_images.ClientImageList.images[0], "id")}"
50+
shape = "${var.ClientInstanceShape}"
51+
subnet_id = "${oci_core_subnet.SubnetAD1.id}"
52+
metadata {
53+
ssh_authorized_keys = "${var.ssh_public_key}"
54+
user_data = "${base64encode(file(var.ClientBootStrapFile))}"
55+
}
56+
}
57+
58+
resource "oci_core_instance" "GlusterClientInstance2" {
59+
availability_domain = "${oci_core_subnet.SubnetAD2.availability_domain}"
60+
compartment_id = "${var.compartment_ocid}"
61+
display_name = "glusterfs-client2"
62+
hostname_label = "glusterfs-client2"
63+
image = "${lookup(data.oci_core_images.ClientImageList.images[0], "id")}"
64+
shape = "${var.ClientInstanceShape}"
65+
subnet_id = "${oci_core_subnet.SubnetAD2.id}"
66+
metadata {
67+
ssh_authorized_keys = "${var.ssh_public_key}"
68+
user_data = "${base64encode(file(var.ClientBootStrapFile))}"
69+
}
70+
}
71+
72+
resource "oci_core_instance" "GlusterClientInstance3" {
73+
availability_domain = "${oci_core_subnet.SubnetAD3.availability_domain}"
74+
compartment_id = "${var.compartment_ocid}"
75+
display_name = "glusterfs-client3"
76+
hostname_label = "glusterfs-client3"
77+
image = "${lookup(data.oci_core_images.ClientImageList.images[0], "id")}"
78+
shape = "${var.ClientInstanceShape}"
79+
subnet_id = "${oci_core_subnet.SubnetAD3.id}"
80+
metadata {
81+
ssh_authorized_keys = "${var.ssh_public_key}"
82+
user_data = "${base64encode(file(var.ClientBootStrapFile))}"
83+
}
84+
}
85+
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
data "oci_identity_availability_domains" "ADs" {
2+
compartment_id = "${var.compartment_ocid}"
3+
}
4+
5+
# Gets the OCID of the image. This technique is for example purposes only. The results of oci_core_images may
6+
# change over time for Oracle-provided images, so the only sure way to get the correct OCID is to supply it directly.
7+
data "oci_core_images" "ServerImageList" {
8+
compartment_id = "${var.compartment_ocid}"
9+
display_name = "${var.ServerInstanceImage}"
10+
}
11+
12+
data "oci_core_images" "ClientImageList" {
13+
compartment_id = "${var.compartment_ocid}"
14+
display_name = "${var.ClientInstanceImage}"
15+
}

docs/solutions/glusterfs/env-vars

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
### Authentication details
2+
export TF_VAR_tenancy_ocid="<tenancy OCID>"
3+
export TF_VAR_user_ocid="<user OCID>"
4+
export TF_VAR_fingerprint="<PEM key fingerprint>"
5+
export TF_VAR_private_key_path="<path to the private key that matches the fingerprint above>"
6+
export TF_VAR_private_key_password="<password for your private key>"
7+
8+
### Compartment
9+
export TF_VAR_compartment_ocid="<compartment OCID>"
10+
11+
### Public/private keys used on the instance
12+
export TF_VAR_ssh_public_key=$(cat <path to public key>)
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
resource "oci_core_virtual_network" "CompleteVCN" {
2+
cidr_block = "10.0.0.0/16"
3+
compartment_id = "${var.compartment_ocid}"
4+
dns_label = "${var.DnsLabel}"
5+
display_name = "CompleteVCN"
6+
}
7+
8+
resource "oci_core_internet_gateway" "CompleteIG" {
9+
compartment_id = "${var.compartment_ocid}"
10+
display_name = "CompleteIG"
11+
vcn_id = "${oci_core_virtual_network.CompleteVCN.id}"
12+
}
13+
14+
resource "oci_core_route_table" "RouteForComplete" {
15+
compartment_id = "${var.compartment_ocid}"
16+
vcn_id = "${oci_core_virtual_network.CompleteVCN.id}"
17+
display_name = "RouteTableForComplete"
18+
route_rules {
19+
cidr_block = "0.0.0.0/0"
20+
network_entity_id = "${oci_core_internet_gateway.CompleteIG.id}"
21+
}
22+
}
23+
24+
resource "oci_core_security_list" "Subnet" {
25+
compartment_id = "${var.compartment_ocid}"
26+
display_name = "Subnet"
27+
vcn_id = "${oci_core_virtual_network.CompleteVCN.id}"
28+
egress_security_rules = [{
29+
protocol = "all"
30+
destination = "0.0.0.0/0"
31+
}]
32+
ingress_security_rules = [{
33+
protocol = "all"
34+
source = "0.0.0.0/0"
35+
}]
36+
}
37+
38+
resource "oci_core_subnet" "SubnetAD1" {
39+
availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[0],"name")}"
40+
cidr_block = "10.0.4.0/24"
41+
display_name = "SubnetAD1"
42+
compartment_id = "${var.compartment_ocid}"
43+
vcn_id = "${oci_core_virtual_network.CompleteVCN.id}"
44+
route_table_id = "${oci_core_route_table.RouteForComplete.id}"
45+
security_list_ids = ["${oci_core_security_list.Subnet.id}"]
46+
dhcp_options_id = "${oci_core_virtual_network.CompleteVCN.default_dhcp_options_id}"
47+
dns_label = "publicsubnetad1"
48+
}
49+
50+
resource "oci_core_subnet" "SubnetAD2" {
51+
availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[1],"name")}"
52+
cidr_block = "10.0.5.0/24"
53+
display_name = "SubnetAD2"
54+
compartment_id = "${var.compartment_ocid}"
55+
vcn_id = "${oci_core_virtual_network.CompleteVCN.id}"
56+
route_table_id = "${oci_core_route_table.RouteForComplete.id}"
57+
security_list_ids = ["${oci_core_security_list.Subnet.id}"]
58+
dhcp_options_id = "${oci_core_virtual_network.CompleteVCN.default_dhcp_options_id}"
59+
dns_label = "publicsubnetad2"
60+
}
61+
62+
resource "oci_core_subnet" "SubnetAD3" {
63+
availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[2],"name")}"
64+
cidr_block = "10.0.6.0/24"
65+
display_name = "SubnetAD3"
66+
compartment_id = "${var.compartment_ocid}"
67+
vcn_id = "${oci_core_virtual_network.CompleteVCN.id}"
68+
route_table_id = "${oci_core_route_table.RouteForComplete.id}"
69+
security_list_ids = ["${oci_core_security_list.Subnet.id}"]
70+
dhcp_options_id = "${oci_core_virtual_network.CompleteVCN.default_dhcp_options_id}"
71+
dns_label = "publicsubnetad3"
72+
}
73+
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
provider "oci" {
2+
tenancy_ocid = "${var.tenancy_ocid}"
3+
user_ocid = "${var.user_ocid}"
4+
fingerprint = "${var.fingerprint}"
5+
private_key_path = "${var.private_key_path}"
6+
region = "${var.region}"
7+
private_key_password = "${var.private_key_password}"
8+
}
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
#!/bin/bash
2+
#yum update -y
3+
#######################################################################################################################################################
4+
### This bootstrap script runs on glusterfs clients and does the following
5+
### 1- install gluster packages on all nodes
6+
### 2- disable local firewall. Feel free to update this script to open only the required ports.
7+
### 3- creates a local mount point in the glusterFS clients that maps to the glusterFS servers
8+
### 4- add a local entry in the clients Fstab with '_netdev' attribute. IMPORTANT to avoid boot issues.
9+
###
10+
######################################################################################################################################################
11+
exec 2>/dev/null
12+
13+
sed -i '/search/d' /etc/resolv.conf
14+
echo "search baremetal.oraclevcn.com publicsubnetad2.baremetal.oraclevcn.com publicsubnetad1.baremetal.oraclevcn.com publicsubnetad3.baremetal.oraclevcn.com localdomain" >> /etc/resolv.conf
15+
chattr -R +i /etc/resolv.conf
16+
#firewall-cmd --zone=public --add-port=111/tcp --add-port=139/tcp --add-port=445/tcp --add-port=965/tcp --add-port=2049/tcp \
17+
#--add-port=38465-38469/tcp --add-port=631/tcp --add-port=111/udp --add-port=963/udp --add-port=49152-49251/tcp --permanent
18+
#firewall-cmd --reload
19+
systemctl disable firewalld
20+
systemctl stop firewalld
21+
yum install glusterfs glusterfs-fuse attr -y
22+
mkdir /mnt/glustervol
23+
sleep 4m
24+
25+
glusterfshost=$(hostname -s)
26+
case $glusterfshost in
27+
"glusterfs-client1")
28+
echo "glusterfs-server1:/glustervol /mnt/glustervol glusterfs defaults,_netdev 0 0" >> /etc/fstab
29+
mount -a
30+
;;
31+
"glusterfs-client2")
32+
echo "glusterfs-server2:/glustervol /mnt/glustervol glusterfs defaults,_netdev 0 0" >> /etc/fstab
33+
mount -a
34+
;;
35+
"glusterfs-client3")
36+
echo "glusterfs-server3:/glustervol /mnt/glustervol glusterfs defaults,_netdev 0 0" >> /etc/fstab
37+
mount -a
38+
;;
39+
*)
40+
echo "$glusterfshost" >>/tmp/notfound.txt
41+
;;
42+
esac
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
#!/bin/bash
2+
#yum update -y
3+
#######################################################################################################################################################
4+
### This bootstrap script runs on glusterFS server and configures the following
5+
### 1- install gluster packages
6+
### 2- formats the NVME disks (HighIO1.36 shape), creates a LVM LV called "brick" (XFS)
7+
### 3- fixes the resolve.conf file. GlusterFS needs DNS to work properly so make sure you update the below domains to match your environment
8+
### 4- disable local firewall. Feel free to update this script to open only the required ports.
9+
### 5- install and configure a gluster volume called glustervol using server1-mybrick, server2-mybrick and server3-mybrick LVs (replicas)
10+
###
11+
######################################################################################################################################################
12+
exec 2>/dev/null
13+
14+
yum install -y centos-release-gluster310.noarch
15+
yum install -y glusterfs-server samba
16+
pvcreate /dev/nvme0n1
17+
pvcreate /dev/nvme1n1
18+
pvcreate /dev/nvme2n1
19+
pvcreate /dev/nvme3n1
20+
vgcreate vg_gluster /dev/nvme0n1 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1
21+
lvcreate -L 11.6T -n mybrick vg_gluster
22+
mkfs.xfs /dev/vg_gluster/mybrick
23+
mkdir -p /bricks/mybrick
24+
mount /dev/vg_gluster/mybrick /bricks/mybrick
25+
echo "/dev/vg_gluster/mybrick /bricks/mybrick xfs defaults,_netdev 0 0" >> /etc/fstab
26+
sed -i '/search/d' /etc/resolv.conf
27+
echo "search baremetal.oraclevcn.com publicsubnetad2.baremetal.oraclevcn.com publicsubnetad1.baremetal.oraclevcn.com publicsubnetad3.baremetal.oraclevcn.com localdomain" >> /etc/resolv.conf
28+
chattr -R +i /etc/resolv.conf
29+
#firewall-cmd --zone=public --add-port=24007-24020/tcp --permanent
30+
#firewall-cmd --reload
31+
systemctl disable firewalld
32+
systemctl stop firewalld
33+
systemctl enable glusterd.service
34+
systemctl start glusterd.service
35+
mkdir /bricks/mybrick/brick
36+
37+
if ["$(hostname -s)" == "glusterfs-server3"]; then
38+
sleep 20
39+
export host3=`hostname`
40+
export server1=`host glusterfs-server1 |cut -c1-17`
41+
export server2=`host glusterfs-server2 |cut -c1-17`
42+
gluster peer probe ${server1}
43+
gluster peer probe ${server2}
44+
sleep 20
45+
gluster volume create glustervol replica 3 transport tcp ${host3}:/bricks/mybrick/brick ${server2}:/bricks/mybrick/brick ${server1}:/bricks/mybrick/brick force
46+
sleep 10
47+
gluster volume start glustervol
48+
fi
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
// These settings can be populated here or read from your env-vars settings
2+
3+
// Settings for authentication
4+
variable "tenancy_ocid" {}
5+
variable "user_ocid" {}
6+
variable "fingerprint" {}
7+
variable "region" {}
8+
variable "private_key_path" {}
9+
variable "private_key_password" {}
10+
11+
variable "compartment_ocid" {}
12+
13+
// The SSH public key for connecting to the compute instances
14+
variable "ssh_public_key" {}
15+
16+
// The name DNS label to use for the VCN
17+
variable "DnsLabel" {}
18+
19+
variable "ServerInstanceShape" {
20+
default = "BM.DenseIO1.36"
21+
}
22+
23+
variable "ClientInstanceShape" {
24+
default = "VM.Standard1.2"
25+
}
26+
27+
variable "ServerInstanceImage" {
28+
default = "CentOS-7-2017.07.17-0"
29+
}
30+
31+
variable "ClientInstanceImage" {
32+
default = "Oracle-Linux-7.4-2017.10.25-0"
33+
}
34+
35+
variable "ServerBootStrapFile" {
36+
default = "./userdata/bootstrap-server.sh"
37+
}
38+
39+
variable "ClientBootStrapFile" {
40+
default = "./userdata/bootstrap-client.sh"
41+
}

0 commit comments

Comments
 (0)