Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 17 additions & 9 deletions core/api-server/methods/auth.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,25 +115,33 @@ func RedisAuthorization(username string, c *gin.Context) (models.UserAuthorizati
pathScan = "module/" + c.Param("module_id") + "/roles/"
}

// get roles of current user: HGET roles/<username>.<entity> -> <role>
role, errRedisRoleGet := redisConnection.HGet(ctx, "roles/"+username, pathGet).Result()
// get roles of current user: HGET roles/<username> -> <role(s)>
roles, errRedisRoleGet := redisConnection.HGet(ctx, "roles/"+username, pathGet).Result()

// handle redis error
if errRedisRoleGet != nil {
return userAuthorizationsRedis, errRedisRoleGet
}

// get action for current role and entity: SMEMBERS <entity>/<reference>/roles/<role>
actions, errRedisRoleScan := redisConnection.SMembers(ctx, pathScan+role).Result()

// handle redis error
if errRedisRoleScan != nil {
return userAuthorizationsRedis, errRedisRoleScan
// get actions for each role and entity: SMEMBERS <entity>/<reference>/roles/<role>
var actions []string
roleList := strings.Split(roles, ",")
for _, r := range roleList {
r = strings.TrimSpace(r)
if r == "" {
continue
}
a, errRedisRoleScan := redisConnection.SMembers(ctx, pathScan+r).Result()
if errRedisRoleScan != nil {
return userAuthorizationsRedis, errRedisRoleScan
}
// duplicated values are allowed, since Authorizator checks only existence
actions = append(actions, a...)
}

// compose user authorizations
userAuthorizationsRedis.Username = username
userAuthorizationsRedis.Role = role
userAuthorizationsRedis.Role = roles // keep original string of raw role list
userAuthorizationsRedis.Actions = actions
Comment on lines +118 to 145
Copy link

Copilot AI Jan 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The implementation has a critical issue with how multiple roles are stored and retrieved. The current approach doesn't work correctly for either of the two possible syntaxes:

Syntax 1: org.nethserver.authorizations = node:fwadm,portsadm (comma-separated roles in a single authorization)

  • The alter_user function tries to find a role definition at node/1/roles/fwadm,portsadm, which doesn't exist since the compound role definitions were removed.
  • Result: No permissions are granted.

Syntax 2: org.nethserver.authorizations = node:fwadm node:portsadm (space-separated authorizations)

  • Each authorization calls alter_user separately with individual roles
  • Each call executes HSET roles/module/X node/1 <role>, which overwrites the previous value
  • Result: Only the last role is stored.

The alter_user function in core/imageroot/usr/local/agent/pypkg/cluster/grants.py needs to be updated to:

  1. Split comma-separated roles and process each individually
  2. When storing multiple roles for the same agent, either concatenate them with commas before the HSET, or accumulate all roles first and store once

Without this fix, modules cannot actually be granted multiple roles on the same target.

Copilot uses AI. Check for mistakes.

// close redis connection
Expand Down
31 changes: 23 additions & 8 deletions core/imageroot/usr/local/agent/pypkg/cluster/grants.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,19 +84,29 @@ def alter_user(rdb, user, revoke, role, on_clause):
"""
Grant/Revoke the ROLE ON some module to USER

The module can be cluster, node/X, module/modY etc.
The ROLE must be already defined in the given module.
The module matcher, specified by ON_CLAUSE, can be cluster, node/X,
module/modY etc. Wildcard match is allowed.

ROLE is a comma-separated list of role names without spaces. Each role
name must be already defined in the matching module(s).
"""

targets = set()
for xrole in role.split(","):
# Make sure there is no white space around the role name:
xrole = xrole.strip()
if not xrole:
continue
for key in rdb.scan_iter(f'{on_clause}/roles/{xrole}'):
agent_id = key.removesuffix(f'/roles/{xrole}')
targets.add(agent_id)

pipe = rdb.pipeline()
for key in rdb.scan_iter(f'{on_clause}/roles/{role}'):
pos = key.find(f'/roles/{role}')
agent_id = key[:pos]
for agent_id in targets:
if revoke:
pipe.hdel(f'roles/{user}', agent_id, role)
pipe.hdel(f'roles/{user}', agent_id)
else:
pipe.hset(f'roles/{user}', agent_id, role)

pipe.execute()

def refresh_permissions(rdb):
Expand All @@ -110,7 +120,8 @@ def refresh_permissions(rdb):
def add_module_permissions(rdb, module_id, authorizations, node_id):
"""Parse authorizations and grant permissions to module_id"""
for authz in authorizations:
xagent, xrole = authz.split(':') # e.g. traefik@node:routeadm
# authz examples: "traefik@node:routeadm", "node:fwadm,portsadm"
xagent, xrole = authz.split(':')

if xagent.endswith("@any"):
agent_selector = 'module/' + xagent.removesuffix("@any") + '*' # wildcard allowed in on_clause
Expand All @@ -119,6 +130,10 @@ def add_module_permissions(rdb, module_id, authorizations, node_id):
else:
agent_selector = agent.resolve_agent_id(xagent, node_id=node_id)

# If the agent_selector resolves multiple times to the same value,
# the last alter_user() call wins: roles are not accumulated or
# merged across calls. Still it is allowed to pass a
# comma-separated list of roles as xrole value.
alter_user(rdb,
user=f'module/{module_id}',
revoke=False,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,32 +186,16 @@ cluster.grants.grant(rdb, "add-custom-zone", f'node/{node_id}', "fwadm")
cluster.grants.grant(rdb, "remove-custom-zone", f'node/{node_id}', "fwadm")
cluster.grants.grant(rdb, "add-rich-rules", f'node/{node_id}', "fwadm")
cluster.grants.grant(rdb, "remove-rich-rules", f'node/{node_id}', "fwadm")

cluster.grants.grant(rdb, "add-tun", f'node/{node_id}', "tunadm")
cluster.grants.grant(rdb, "remove-tun", f'node/{node_id}', "tunadm")
Comment on lines 189 to 190
Copy link

Copilot AI Jan 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The removal of firewall-related grants from the tunadm role (add-public-service, remove-public-service, add-custom-zone, remove-custom-zone) conflicts with the existing documentation at docs/core/tun.md line 21, which states "the tunadm authorization includes also the fwadm one."

If this is an intentional change to make tunadm independent from fwadm (requiring users to use "node:tunadm,fwadm" instead), the documentation should be updated to reflect this. Alternatively, if tunadm should continue to include fwadm permissions, these grants should be restored to the tunadm role.

Copilot uses AI. Check for mistakes.
cluster.grants.grant(rdb, "add-public-service", f'node/{node_id}', "tunadm")
cluster.grants.grant(rdb, "remove-public-service", f'node/{node_id}', "tunadm")
cluster.grants.grant(rdb, "add-custom-zone", f'node/{node_id}', "tunadm")
cluster.grants.grant(rdb, "remove-custom-zone", f'node/{node_id}', "tunadm")
cluster.grants.grant(rdb, "add-tun", f'node/{node_id}', "tunadm")
cluster.grants.grant(rdb, "remove-tun", f'node/{node_id}', "tunadm")

cluster.grants.grant(rdb, "add-rich-rules", f'node/{node_id}', "tunadm")
cluster.grants.grant(rdb, "remove-rich-rules", f'node/{node_id}', "tunadm")
cluster.grants.grant(rdb, "allocate-ports", f'node/{node_id}', "portsadm")
cluster.grants.grant(rdb, "deallocate-ports", f'node/{node_id}', "portsadm")
cluster.grants.grant(rdb, "allocate-ports", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "deallocate-ports", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "add-public-service", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "remove-public-service", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "add-custom-zone", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "remove-custom-zone", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "add-rich-rules", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "remove-rich-rules", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "allocate-ports", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "deallocate-ports", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "add-tun", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "remove-tun", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "add-public-service", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "remove-public-service", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "add-custom-zone", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "remove-custom-zone", f'node/{node_id}', "tunadm,portsadm")

# Grant on cascade the owner role on the new node, to users with the owner
# role on cluster
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,26 +19,20 @@ cluster.grants.grant(rdb, action_clause="bind-user-domains", to_clause="account
cluster.grants.grant(rdb, action_clause="bind-user-domains", to_clause="accountprovider", on_clause='cluster')
cluster.grants.grant(rdb, action_clause="list-modules", to_clause="accountprovider", on_clause='cluster')

#
# Reuse and reallocate TCP/UDP port range #6974
#
for node_id in set(rdb.hvals('cluster/module_node')):
# Reuse and reallocate TCP/UDP port range #6974:
cluster.grants.grant(rdb, "allocate-ports", f'node/{node_id}', "portsadm")
cluster.grants.grant(rdb, "deallocate-ports", f'node/{node_id}', "portsadm")
cluster.grants.grant(rdb, "allocate-ports", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "deallocate-ports", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "add-public-service", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "remove-public-service", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "add-custom-zone", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "remove-custom-zone", f'node/{node_id}', "fwadm,portsadm")
cluster.grants.grant(rdb, "allocate-ports", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "deallocate-ports", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "add-tun", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "remove-tun", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "add-public-service", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "remove-public-service", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "add-custom-zone", f'node/{node_id}', "tunadm,portsadm")
cluster.grants.grant(rdb, "remove-custom-zone", f'node/{node_id}', "tunadm,portsadm")
# Fix rich rules management #7836:
cluster.grants.grant(rdb, "add-rich-rules", f'node/{node_id}', "fwadm")
cluster.grants.grant(rdb, "remove-rich-rules", f'node/{node_id}', "fwadm")
cluster.grants.grant(rdb, "add-rich-rules", f'node/{node_id}', "tunadm")
cluster.grants.grant(rdb, "remove-rich-rules", f'node/{node_id}', "tunadm")
rdb.delete(
f'node/{node_id}/roles/fwadm,portsadm',
f'node/{node_id}/roles/tunadm,portsadm',
)

#
# END of grant updates
#
Expand Down
23 changes: 4 additions & 19 deletions core/imageroot/var/lib/nethserver/node/install-finalize.sh
Original file line number Diff line number Diff line change
Expand Up @@ -121,33 +121,18 @@ cluster.grants.grant(rdb, action_clause="add-public-service", to_clause="fwadm"
cluster.grants.grant(rdb, action_clause="remove-public-service", to_clause="fwadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="add-custom-zone", to_clause="fwadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-custom-zone", to_clause="fwadm", on_clause='node/1')

cluster.grants.grant(rdb, action_clause="add-rich-rules", to_clause="fwadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-rich-rules", to_clause="fwadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="add-tun", to_clause="tunadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-tun", to_clause="tunadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="add-public-service", to_clause="tunadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-public-service", to_clause="tunadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="add-custom-zone", to_clause="tunadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-custom-zone", to_clause="tunadm", on_clause='node/1')

cluster.grants.grant(rdb, action_clause="add-rich-rules", to_clause="tunadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-rich-rules", to_clause="tunadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="allocate-ports", to_clause="portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="deallocate-ports", to_clause="portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="allocate-ports", to_clause="fwadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="deallocate-ports", to_clause="fwadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="add-public-service", to_clause="fwadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-public-service", to_clause="fwadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="add-custom-zone", to_clause="fwadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-custom-zone", to_clause="fwadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="allocate-ports", to_clause="tunadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="deallocate-ports", to_clause="tunadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="add-tun", to_clause="tunadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-tun", to_clause="tunadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="add-public-service", to_clause="tunadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-public-service", to_clause="tunadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="add-custom-zone", to_clause="tunadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-custom-zone", to_clause="tunadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="add-rich-rules", to_clause="fwadm,portsadm", on_clause='node/1')
cluster.grants.grant(rdb, action_clause="remove-rich-rules", to_clause="fwadm,portsadm", on_clause='node/1')

cluster.grants.grant(rdb, action_clause="update-routes", to_clause="accountprovider", on_clause='cluster')
cluster.grants.grant(rdb, action_clause="bind-user-domains", to_clause="accountconsumer", on_clause='cluster')
cluster.grants.grant(rdb, action_clause="bind-user-domains", to_clause="accountprovider", on_clause='cluster')
Expand Down
3 changes: 2 additions & 1 deletion docs/core/tun.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@ be authorized to use it, by adding `node:tunadm` to the module image label

org.nethserver.authorizations=node:tunadm

Please note that the _tunadm_ authorization includes also the [_fwadm_](../firewall) one.
Please note that for backward compatibility with core 3.16 the _tunadm_
authorization includes also the [_fwadm_](../firewall) one.

Then the module actions must use the `agent`
Python package to add/remove the tun device needed by the
Expand Down
3 changes: 0 additions & 3 deletions docs/modules/port_allocation.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,6 @@ with other existing node-related roles:
- `org.nethserver.authorizations = node:fwadm,portsadm`
- `org.nethserver.authorizations = node:tunadm,portsadm`

Note that the value must be exactly one of the above. Other combinations
like `node:portsadm,fwadm` are not valid.

The module will be granted execution permissions for the following actions
on the local node:
- `allocate-ports`
Expand Down
Loading