Skip to content

Commit 4b3d74d

Browse files
sd109sjpb
authored andcommitted
Snapshot of current Waldur Slurm PoC config
1 parent b0d5953 commit 4b3d74d

File tree

10 files changed

+170
-11
lines changed

10 files changed

+170
-11
lines changed

ansible/roles/openondemand/defaults/main.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ openondemand_auth_defaults:
5656
- 'ProxyPreserveHost On' # see under https://grafana.com/blog/2022/02/08/grafana-7.5.15-and-8.3.5-released-with-moderate-severity-security-fixes/
5757
user_map_cmd: /opt/ood/ood_auth_map/bin/ood_auth_map.mapfile
5858
user_map_match: none
59-
59+
6060
# Defaults for basic/PAM auth - see https://osc.github.io/ood-documentation/latest/authentication/pam.html
6161
basic_pam:
6262
httpd_auth: # ood_portal.yml.j2
@@ -91,7 +91,7 @@ openondemand_osc_ood_defaults:
9191
- SSLHonorCipherOrder On
9292
- SSLCompression off
9393
- SSLSessionTickets Off
94-
94+
9595
# User mapping:
9696
user_map_cmd: "{{ openondemand_auth_defaults[openondemand_auth | lower].user_map_cmd }}"
9797
user_map_match: "{{ openondemand_auth_defaults[openondemand_auth | lower].user_map_match }}"
File renamed without changes.

environments/.stackhpc/inventory/extra_groups

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
[basic_users:children]
2-
cluster
1+
# [basic_users:children]
2+
# cluster
33

44
[rebuild:children]
55
control

environments/.stackhpc/inventory/group_vars/all/freeipa.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,5 @@ freeipa_users:
1010
# freeipa_client hosts must use a FreeIPA server for name resolution - requires hosts to be in group `resolv_conf`.
1111
resolv_conf_nameservers:
1212
- "{{ hostvars[groups['freeipa_server'].0].ansible_host }}"
13+
14+
node_fqdn: "{{ [inventory_hostname, openhpc_cluster_name, cluster_domain_suffix ] | join('.') }}"
Lines changed: 62 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,66 @@
1-
openondemand_auth: basic_pam
1+
openondemand_auth: oidc # or basic_pam
22
openondemand_jupyter_partition: standard
33
openondemand_desktop_partition: standard
4-
#openondemand_dashboard_support_url:
4+
#openondemand_dashboard_support_url:
55
#openondemand_dashboard_docs_url:
66
#openondemand_filesapp_paths:
7+
8+
openondemand_servername: 128.232.226.209
9+
openondemand_oidc_provider_url: https://identity.apps.hpc.cam.ac.uk/realms/az-rcp-cloud-portal-demo
10+
openondemand_oidc_crypto_passphrase: <redacted>
11+
openondemand_oidc_client_id: ondemand
12+
openondemand_oidc_client_secret: <redacted>
13+
openondemand_oidc_scope: "openid profile email"
14+
openondemand_oidc_remote_user_claim: preferred_username
15+
# openondemand_oidc_remote_user_claim: email
16+
17+
# add openondemand_apps.shell.env.ood_ssh_wrapper:
18+
openondemand_apps:
19+
files:
20+
env:
21+
ood_shell: ""
22+
shell:
23+
env:
24+
ood_shell_origin_check: "https://{{ openondemand_servername }}"
25+
ood_ssh_wrapper: /usr/bin/ood_shell_wrapper # TODO: changeme?
26+
# this is bash --login -c "cd && exec bash"
27+
# #!/usr/bin/bash
28+
dashboard:
29+
env:
30+
motd_path: /etc/motd
31+
motd_format: markdown
32+
ood_dashboard_support_url: "{{ openondemand_dashboard_support_url }}"
33+
ood_dashboard_docs_url: "{{ openondemand_dashboard_docs_url }}"
34+
ood_brand_bg_color: "#0e6ec8"
35+
ood_dashboard_title: "{{ openhpc_cluster_name }}"
36+
37+
user_map_match: '.*' # map remote user to local user, as-is
38+
user_map_cmd: null # need to set this to override the default "openondemand" role behaviour of using file-based mapping
39+
40+
# user_map_match: 'waldur_user_.*'
41+
42+
# user_map_cmd:
43+
# user_map_cmd: "echo waldur_user_$1"
44+
# user_map_cmd: "echo -e waldur_user_$1 | tr -d '[:space:]'"
45+
46+
# Script manually created on login node with contents:
47+
# ```
48+
# #!/bin/bash
49+
# echo waldur_user_$1
50+
# ```
51+
# user_map_cmd: /opt/user-mapper-test.sh
52+
53+
# openondemand_mapping_users:
54+
# - name: waldur_user_scott-test-user
55+
# openondemand_username: scott-test-user
56+
57+
# lua_log_level: debug
58+
59+
# user_map_match: 'waldur_user_.*'
60+
# openondemand_oidc_remote_user_claim: email
61+
62+
63+
# oidc_session_inactivity_timeout: 28800
64+
# oidc_session_max_duration: 28800
65+
# oidc_state_max_number_of_cookies: "10 true"
66+
# oidc_cookie_same_site: On
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
all:
2+
vars:
3+
openhpc_cluster_name: scott-slurm-dev
4+
cluster_domain_suffix: invalid
5+
6+
control:
7+
hosts:
8+
scott-slurm-dev-control:
9+
ansible_host: 192.168.3.37
10+
instance_id: dfe0f2bc-d874-433c-a91d-0fa73b51d581
11+
vars:
12+
appliances_state_dir: /var/lib/state # NB needs to be set on group not host otherwise it is ignored in packer build!
13+
14+
login:
15+
hosts:
16+
scott-slurm-dev-login-0:
17+
ansible_host: 192.168.3.4
18+
instance_id: fd4e85fd-7fd3-4a4e-ae62-29003b8a1468
19+
20+
scott-slurm-dev_standard:
21+
hosts:
22+
scott-slurm-dev-compute-0:
23+
ansible_host: 192.168.3.89
24+
instance_id: ec2683b4-4c9b-41d8-b645-e13d082a9286
25+
scott-slurm-dev-compute-1:
26+
ansible_host: 192.168.3.144
27+
instance_id: 608844bd-ba76-4d99-9b12-925f7e682f8f
28+
29+
compute:
30+
children:
31+
scott-slurm-dev_standard:
32+
33+
# freeipa_server:
34+
# hosts:
35+
# scott-slurm-dev-freeipa:
36+
# ansible_host: 192.168.3.119
37+
# instance_id: d53dc420-b0b5-492c-ad92-de4ca5da55b8
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
all:
2+
vars:
3+
openhpc_cluster_name: scott-slurm-dev
4+
cluster_domain_suffix: invalid
5+
6+
control:
7+
hosts:
8+
scott-slurm-dev-control:
9+
ansible_host: 192.168.3.37
10+
instance_id: dfe0f2bc-d874-433c-a91d-0fa73b51d581
11+
vars:
12+
appliances_state_dir: /var/lib/state # NB needs to be set on group not host otherwise it is ignored in packer build!
13+
14+
login:
15+
hosts:
16+
scott-slurm-dev-login-0:
17+
ansible_host: 192.168.3.4
18+
instance_id: fd4e85fd-7fd3-4a4e-ae62-29003b8a1468
19+
20+
scott-slurm-dev_standard:
21+
hosts:
22+
scott-slurm-dev-compute-0:
23+
ansible_host: 192.168.3.89
24+
instance_id: ec2683b4-4c9b-41d8-b645-e13d082a9286
25+
scott-slurm-dev-compute-1:
26+
ansible_host: 192.168.3.144
27+
instance_id: 608844bd-ba76-4d99-9b12-925f7e682f8f
28+
29+
compute:
30+
children:
31+
scott-slurm-dev_standard:
32+
33+
freeipa_server:
34+
hosts:
35+
scott-slurm-dev-freeipa:
36+
ansible_host: 192.168.3.199
37+
instance_id: d53dc420-b0b5-492c-ad92-de4ca5da55b8

environments/.stackhpc/terraform/ARCUS.tfvars

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,5 @@ cluster_net = "portal-internal"
22
cluster_subnet = "portal-internal"
33
control_node_flavor = "vm.ska.cpu.general.eighth"
44
other_node_flavor = "vm.ska.cpu.general.small"
5+
6+
cluster_name = "scott-slurm-dev"

environments/.stackhpc/terraform/main.tf

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,9 @@ module "cluster" {
8989
# flavor: var.other_node_flavor
9090
# }
9191
}
92-
92+
9393
volume_backed_instances = var.volume_backed_instances
94-
94+
9595
environment_root = var.environment_root
9696
# Can reduce volume size a lot for short-lived CI clusters:
9797
state_volume_size = 10
@@ -101,3 +101,24 @@ module "cluster" {
101101
home_volume_type = var.home_volume_type
102102

103103
}
104+
105+
resource "openstack_compute_instance_v2" "freeipa" {
106+
name = "${var.cluster_name}-freeipa"
107+
image_id = "3d20681e-38a6-4563-a80e-f1762f6cdce8" # == Rocky-8-GenericCloud-Base-8.9-20231119.0.x86_64.qcow2
108+
flavor_id = "c8b72062-5d52-4590-9d7a-68a670b44442"
109+
key_pair = "slurm-app-ci"
110+
security_groups = ["default", "SSH"]
111+
112+
network {
113+
name = var.cluster_net
114+
}
115+
116+
metadata = {
117+
environment_root = var.environment_root
118+
}
119+
120+
user_data = <<-EOF
121+
#cloud-config
122+
fqdn: ${var.cluster_name}-freeipa.${var.cluster_name}.invalid
123+
EOF
124+
}

environments/common/inventory/group_vars/all/defaults.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ appliances_local_users_default:
3939
home: /var/lib/{{ appliances_local_users_ansible_user_name }}
4040
move_home: true
4141
local: true
42-
42+
4343
- user: "{{ appliances_local_users_podman }}"
4444
enable: "{{ 'podman' in group_names }}"
4545

@@ -50,7 +50,7 @@ appliances_local_users_default:
5050
shell: /sbin/nologin
5151
uid: 202
5252
system: true
53-
53+
5454
- group:
5555
name: prometheus
5656
gid: 976
@@ -61,7 +61,7 @@ appliances_local_users_default:
6161
shell: /usr/sbin/nologin
6262
system: true
6363
enable: "{{ 'prometheus' in group_names }}"
64-
64+
6565
- group:
6666
name: grafana
6767
gid: 979

0 commit comments

Comments
 (0)