Skip to content

Commit b421ba4

Browse files
authored
Merge pull request #159 from nebius/feature/simple-solutions
Feature/simple solutions part 1
2 parents b0c6de0 + c47fe9f commit b421ba4

File tree

15 files changed

+1013
-0
lines changed

15 files changed

+1013
-0
lines changed
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
#cloud-config
2+
users:
3+
%{ for user in users}
4+
- name: ${user.user_name}
5+
groups: sudo
6+
shell: /bin/bash
7+
sudo: 'ALL=(ALL) NOPASSWD:ALL'
8+
ssh-authorized-keys:
9+
- ${user.ssh_public_key}
10+
%{ endfor}
11+
12+
13+
runcmd:
14+
- apt-get update
15+
- apt-get upgrade
16+
17+
18+
# mount disk if provided
19+
%{ if extra_disk_id != "" }
20+
# Prepare partition on secondary disk
21+
- mkdir -p ${extra_path}
22+
- echo "mkdir" >> /home/tux/log.txt
23+
- parted -s /dev/disk/by-id/virtio-${extra_disk_id} mklabel gpt >> /home/tux/log.txt
24+
- parted -s /dev/disk/by-id/virtio-${extra_disk_id} mkpart primary ext4 0% 100% >> /home/tux/log.txt
25+
- sync
26+
- echo "sync" >> /home/tux/log.txt
27+
- mkfs.ext4 /dev/disk/by-id/virtio-${extra_disk_id}-part1 >> /home/tux/log.txt
28+
- sync
29+
- mount -o rw /dev/disk/by-id/virtio-${extra_disk_id}-part1 ${extra_path} >> /home/tux/log.txt
30+
- echo "/dev/disk/by-id/virtio-${extra_disk_id}-part1 ${extra_path} ext4 defaults 0 2" >> /etc/fstab
31+
- chown nobody:nogroup ${extra_path}
32+
- chmod 777 ${extra_path}
33+
%{ endif }
34+
35+
# mount shared filesystem if provided
36+
%{if shared_filesystem_id != "" }
37+
- mkdir -p ${shared_filesystem_mount}
38+
- mount -t virtiofs filesystem-0 ${shared_filesystem_mount}
39+
- chmod a+w ${shared_filesystem_mount}
40+
- echo "filesystem-0 ${shared_filesystem_mount} virtiofs rw 0 0" >> /etc/fstab
41+
%{endif}
42+
43+
# install s3 access
44+
%{if aws_access_key_id != ""}
45+
- snap install aws-cli --classic
46+
47+
%{ for user in users}
48+
- mkdir -p /home/${user.user_name}/.aws
49+
- echo "[default]" > /home/${user.user_name}/.aws/credentials
50+
- echo "aws_access_key_id=${aws_access_key_id}" >> /home/${user.user_name}/.aws/credentials
51+
- echo "aws_secret_access_key=${aws_secret_access_key}" >> /home/${user.user_name}/.aws/credentials
52+
- echo "[default]" > /home/${user.user_name}/.aws/config
53+
- echo "endpoint_url = https://storage.eu-north1.nebius.cloud:443" >> /home/${user.user_name}/.aws/config
54+
- echo "region = eu-north1" >> /home/${user.user_name}/.aws/config
55+
- chown -R ${user.user_name}:${user.user_name} /home/${user.user_name}/.aws
56+
- chmod 600 /home/${user.user_name}/.aws/credentials
57+
%{ endfor }
58+
59+
- mkdir -p /root/.aws
60+
- echo "[default]" > /root/.aws/credentials
61+
- echo "aws_access_key_id=${aws_access_key_id}" >> /root/.aws/credentials
62+
- echo "aws_secret_access_key=${aws_secret_access_key}" >> /root/.aws/credentials
63+
- echo "[default]" > /root/.aws/config
64+
- echo "endpoint_url = https://storage.eu-north1.nebius.cloud:443" >> /root/.aws/config
65+
- echo "region = eu-north1" >> /root/.aws/config
66+
67+
68+
# install s3 mount
69+
70+
%{if mount_bucket != "" }
71+
- wget https://s3.amazonaws.com/mountpoint-s3-release/1.14.0/x86_64/mount-s3-1.14.0-x86_64.deb
72+
- dpkg -i mount-s3-1.14.0-x86_64.deb
73+
- mkdir -p ${s3_mount_path}
74+
- mount-s3 --upload-checksums=off --maximum-throughput-gbps=200 --allow-delete --allow-overwrite --allow-other --endpoint-url=https://storage.eu-north1.nebius.cloud:443 ${mount_bucket} ${s3_mount_path}
75+
%{endif}
76+
%{endif}

modules/instance/locals.tf

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
locals {
2+
3+
users = [
4+
for user in var.users: {
5+
user_name = user.user_name
6+
ssh_public_key = user.ssh_public_key != null ? user.ssh_public_key : (
7+
fileexists(user.ssh_key_path) ? file(user.ssh_key_path) : null)
8+
}
9+
]
10+
11+
regions_default = {
12+
eu-west1 = {
13+
cpu_nodes_platform = "cpu-d3"
14+
cpu_nodes_preset = "16vcpu-64gb"
15+
gpu_nodes_platform = "gpu-h200-sxm"
16+
gpu_nodes_preset = "1gpu-16vcpu-200gb"
17+
}
18+
eu-north1 = {
19+
cpu_nodes_platform = "cpu-e2"
20+
cpu_nodes_preset = "16vcpu-64gb"
21+
gpu_nodes_platform = "gpu-h100-sxm"
22+
gpu_nodes_preset = "1gpu-16vcpu-200gb"
23+
}
24+
}
25+
26+
current_region_defaults = local.regions_default[var.region]
27+
28+
# cpu_nodes_preset = coalesce(var.cpu_nodes_preset, local.current_region_defaults.cpu_nodes_preset)
29+
# cpu_nodes_platform = coalesce(var.cpu_nodes_platform, local.current_region_defaults.cpu_nodes_platform)
30+
# gpu_nodes_platform = coalesce(var.gpu_nodes_platform, local.current_region_defaults.gpu_nodes_platform)
31+
# gpu_nodes_preset = coalesce(var.gpu_nodes_preset, local.current_region_defaults.gpu_nodes_preset)
32+
33+
extra_path = var.extra_path
34+
extra_disk_id = var.add_extra_storage ? substr(nebius_compute_v1_disk.extra-storage-disk[0].id, 0, 20) : ""
35+
36+
37+
cloud_init_log = jsonencode({
38+
extra_path = local.extra_path
39+
extra_disk_id = local.extra_disk_id
40+
state = terraform.workspace
41+
users = local.users
42+
43+
})
44+
# current_region_defaults = local.regions_default[var.region]
45+
#
46+
# cpu_nodes_preset = coalesce(var.cpu_nodes_preset, local.current_region_defaults.cpu_nodes_preset)
47+
# cpu_nodes_platform = coalesce(var.cpu_nodes_platform, local.current_region_defaults.cpu_nodes_platform)
48+
}

modules/instance/main.tf

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
resource "nebius_compute_v1_disk" "boot-disk" {
2+
parent_id = var.parent_id
3+
name = join("-", ["instance-boot-disk", var.instance_name])
4+
block_size_bytes = 4096
5+
size_bytes = 1024 * 1024 * 1024 * var.boot_disk_size_gb
6+
type = "NETWORK_SSD"
7+
source_image_family = { image_family = "ubuntu22.04-cuda12" }
8+
}
9+
10+
resource "nebius_compute_v1_disk" "extra-storage-disk" {
11+
count = var.add_extra_storage ? 1 : 0
12+
parent_id = var.parent_id
13+
name = join("-", ["extra-storage-disk", var.instance_name])
14+
block_size_bytes = 4096
15+
size_bytes = 1024 * 1024 * 1024 * var.extra_storage_size_gb
16+
type = var.extra_storage_class
17+
}
18+
19+
20+
resource "nebius_compute_v1_instance" "instance" {
21+
parent_id = var.parent_id
22+
name = var.instance_name
23+
24+
network_interfaces = [
25+
{
26+
name = "eth0"
27+
subnet_id = var.subnet_id
28+
ip_address = {}
29+
public_ip_address = var.public_ip ? {} : null
30+
}
31+
]
32+
33+
resources = {
34+
platform = var.platform
35+
preset = var.preset
36+
}
37+
38+
boot_disk = {
39+
attach_mode = "READ_WRITE"
40+
existing_disk = nebius_compute_v1_disk.boot-disk
41+
}
42+
43+
secondary_disks = var.add_extra_storage ? [
44+
{
45+
attach_mode = "READ_WRITE"
46+
existing_disk = {
47+
id = nebius_compute_v1_disk.extra-storage-disk[0].id
48+
}
49+
}
50+
] : []
51+
52+
filesystems = var.shared_filesystem_id != "" ? [
53+
{
54+
attach_mode = "READ_WRITE"
55+
existing_filesystem = {
56+
id = var.shared_filesystem_id
57+
}
58+
mount_tag = "filesystem-0"
59+
}
60+
] : []
61+
62+
63+
cloud_init_user_data = templatefile("../modules/cloud-init/simple-setup-init.tftpl", {
64+
users = local.users,
65+
extra_path = local.extra_path,
66+
extra_disk_id = local.extra_disk_id,
67+
shared_filesystem_id = var.shared_filesystem_id,
68+
shared_filesystem_mount = var.shared_filesystem_mount,
69+
aws_access_key_id = var.aws_access_key_id,
70+
aws_secret_access_key = var.aws_secret_access_key,
71+
mount_bucket = var.mount_bucket,
72+
s3_mount_path = var.s3_mount_path
73+
})
74+
}
75+
76+
resource "local_file" "cloud_init_variables_log" {
77+
content = local.cloud_init_log
78+
filename = "${path.module}/cloud-init-variables.log"
79+
80+
81+
}

modules/instance/outputs.tf

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
output "internal_ip" {
2+
description = "The internal IP address"
3+
value = trimsuffix(nebius_compute_v1_instance.instance.status.network_interfaces[0].ip_address.address, "/32")
4+
}
5+
output "public_ip" {
6+
description = "The public IP address"
7+
value = trimsuffix(nebius_compute_v1_instance.instance.status.network_interfaces[0].public_ip_address.address, "/32")
8+
}

modules/instance/provider.tf

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
terraform {
2+
required_providers {
3+
nebius = {
4+
source = "terraform-provider.storage.eu-north1.nebius.cloud/nebius/nebius"
5+
version= ">= 0.4.24"
6+
}
7+
}
8+
}

modules/instance/variables.tf

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
variable "parent_id" {
2+
type = string
3+
description = "Id of the folder where the resources going to be created."
4+
default = null
5+
}
6+
7+
variable "subnet_id" {
8+
type = string
9+
description = "ID of the subnet."
10+
default = null
11+
}
12+
13+
variable "region" {
14+
type = string
15+
description = "region"
16+
default = "eu-north1"
17+
}
18+
19+
variable "instance_name" {
20+
type = string
21+
description = "name of the instance"
22+
default = "instance"
23+
}
24+
25+
variable "platform" {
26+
description = "VM platform."
27+
type = string
28+
default = "cpu-e2"
29+
}
30+
31+
variable "preset" {
32+
description = "VM resources preset."
33+
type = string
34+
default = "16vcpu-64gb"
35+
}
36+
37+
variable "cpu_nodes_preset" {
38+
description = "CPU and RAM configuration for instances."
39+
type = string
40+
default = null
41+
}
42+
43+
variable "shared_filesystem_id" {
44+
description = "Id of an existing shared file system"
45+
type = string
46+
default = ""
47+
}
48+
49+
variable "shared_filesystem_mount" {
50+
description = "mounting point of the shared file system"
51+
type = string
52+
default = "/mnt/share"
53+
}
54+
55+
variable "users" {
56+
description = "List of users with their SSH keys"
57+
type = list(object({
58+
user_name = string
59+
ssh_public_key = optional(string) # Inline SSH key
60+
ssh_key_path = optional(string, "~/.ssh/id_rsa.pub") # Path to SSH key file
61+
}))
62+
default = []
63+
validation {
64+
condition = alltrue([
65+
for user in var.users : user.ssh_public_key != null || fileexists(user.ssh_key_path)
66+
])
67+
error_message = "Each user must have at least one SSH key defined as 'ssh_public_key' or 'ssh_key_path'."
68+
}
69+
}
70+
71+
variable "add_extra_storage" {
72+
type = bool
73+
default = false
74+
description = "if true, a new disk will be created and mounted at <extra_path>"
75+
}
76+
77+
variable "extra_path" {
78+
type = string
79+
default = "/mnt/storage"
80+
description = "Folder where the network storage will be mounted on"
81+
}
82+
83+
variable "boot_disk_size_gb" {
84+
type = number
85+
default = 50
86+
description = "size of the boot disk"
87+
}
88+
89+
variable "extra_storage_size_gb" {
90+
type = number
91+
default = 50
92+
description = "size of the newly created nfs storage"
93+
}
94+
95+
variable "extra_storage_class" {
96+
type = string
97+
default = "NETWORK_SSD"
98+
description = "Network type of additional disk being added"
99+
}
100+
101+
102+
variable "public_ip" {
103+
type = bool
104+
default = true
105+
description = "attach a public ip to the vm if true"
106+
}
107+
variable "mount_bucket" {
108+
type = string
109+
description = "name of a bucket that should be mounted into fs"
110+
default = ""
111+
}
112+
113+
variable "s3_mount_path" {
114+
type = string
115+
description = "mountpoint for s3 mount"
116+
default = "/mnt/s3"
117+
}
118+
variable "aws_access_key_id" {
119+
type = string
120+
description = "S3 access key"
121+
default = ""
122+
}
123+
124+
variable "aws_secret_access_key" {
125+
type = string
126+
description = "S3 access key"
127+
default = ""
128+
}

0 commit comments

Comments
 (0)