Skip to content

Commit c6f31b7

Browse files
Merge pull request #291 from Ratio1/develop
Develop
2 parents 758f2bd + f4451a4 commit c6f31b7

File tree

8 files changed

+1317
-247
lines changed

8 files changed

+1317
-247
lines changed

extensions/business/container_apps/container_app_runner.py

Lines changed: 714 additions & 84 deletions
Large diffs are not rendered by default.

extensions/business/container_apps/container_utils.py

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -717,3 +717,140 @@ def _validate_docker_image_format(self, image_name):
717717
return True
718718

719719
### END COMMON CONTAINER UTILITY METHODS ###
720+
721+
### EXTRA TUNNELS METHODS ###
722+
723+
def _normalize_extra_tunnel_config(self, container_port, config):
724+
"""
725+
Normalize extra tunnel config to standard format.
726+
727+
Currently supports simple string format: "token"
728+
Named "tunnel config" for future extensibility (e.g., dict with engine, enabled, etc.)
729+
730+
Args:
731+
container_port: Container port (int)
732+
config: Token string (future: could be dict with {"token": str, "engine": str, "enabled": bool})
733+
734+
Returns:
735+
str: Normalized token string (future: could return dict)
736+
737+
Note:
738+
This method is designed to be extended in the future to support more complex
739+
tunnel configurations beyond simple token strings, such as:
740+
- Different tunnel engines (cloudflare, ngrok, custom)
741+
- Per-tunnel enabled/disabled flags
742+
- Custom tunnel parameters
743+
For now, it only handles string tokens for simplicity.
744+
"""
745+
if isinstance(config, str):
746+
return config.strip()
747+
else:
748+
raise ValueError(
749+
f"EXTRA_TUNNELS[{container_port}] must be a string token, got {type(config)}"
750+
)
751+
752+
def _allocate_extra_tunnel_ports(self, container_ports):
753+
"""
754+
Allocate host ports for container ports defined only in EXTRA_TUNNELS.
755+
756+
This handles the case where CONTAINER_RESOURCES["ports"] is empty but
757+
EXTRA_TUNNELS defines ports that need to be exposed.
758+
759+
Args:
760+
container_ports: List of container ports to allocate
761+
"""
762+
self.P(f"Allocating host ports for {len(container_ports)} EXTRA_TUNNELS ports...", color='b')
763+
764+
for container_port in container_ports:
765+
# Check if already allocated (shouldn't be, but safety check)
766+
if container_port in self.extra_ports_mapping.values():
767+
self.Pd(f"Port {container_port} already allocated, skipping")
768+
continue
769+
770+
# Allocate dynamic host port
771+
host_port = self._allocate_port(allow_dynamic=True)
772+
self.extra_ports_mapping[host_port] = container_port
773+
774+
self.P(f" Allocated host port {host_port} -> container port {container_port}", color='g')
775+
776+
# Special handling if this is the main PORT
777+
if self.cfg_port == container_port and not self.port:
778+
self.port = host_port
779+
self.P(f" Main PORT {container_port} mapped to host port {host_port}", color='g')
780+
781+
# Rebuild inverted_ports_mapping for Docker
782+
self.inverted_ports_mapping = {
783+
f"{container_port}/tcp": str(host_port)
784+
for host_port, container_port in self.extra_ports_mapping.items()
785+
}
786+
787+
self.P(f"Port allocation complete. Total ports: {len(self.extra_ports_mapping)}", color='g')
788+
return
789+
790+
def _validate_extra_tunnels_config(self):
791+
"""
792+
Validate EXTRA_TUNNELS configuration.
793+
794+
Key behaviors:
795+
1. If TUNNEL_ENGINE_ENABLED=False, EXTRA_TUNNELS are IGNORED
796+
2. Container ports can be defined only in EXTRA_TUNNELS (not in CONTAINER_RESOURCES)
797+
3. Ports from EXTRA_TUNNELS will be allocated dynamically if needed
798+
4. Dict keys can be strings or integers
799+
800+
Returns:
801+
bool: True if valid
802+
"""
803+
# Master switch check
804+
if not self.cfg_tunnel_engine_enabled:
805+
if self.cfg_extra_tunnels:
806+
self.P(
807+
f"TUNNEL_ENGINE_ENABLED=False: Ignoring {len(self.cfg_extra_tunnels)} EXTRA_TUNNELS",
808+
color='y'
809+
)
810+
return True
811+
812+
if not self.cfg_extra_tunnels:
813+
self.Pd("No EXTRA_TUNNELS configured")
814+
return True
815+
816+
if not isinstance(self.cfg_extra_tunnels, dict):
817+
raise ValueError("EXTRA_TUNNELS must be a dictionary {container_port: token}")
818+
819+
# Track which ports need to be allocated
820+
ports_to_allocate = []
821+
822+
for port_key, tunnel_config in self.cfg_extra_tunnels.items():
823+
# Convert port key to integer (handle both string and int keys)
824+
try:
825+
container_port = int(port_key)
826+
except (ValueError, TypeError):
827+
raise ValueError(f"EXTRA_TUNNELS key must be integer port, got: {port_key}")
828+
829+
# Check if port is already allocated
830+
is_already_mapped = container_port in self.extra_ports_mapping.values()
831+
832+
if not is_already_mapped:
833+
# Port not in CONTAINER_RESOURCES["ports"], will need to allocate
834+
self.Pd(
835+
f"EXTRA_TUNNELS port {container_port} not in CONTAINER_RESOURCES['ports'], "
836+
f"will allocate dynamically"
837+
)
838+
ports_to_allocate.append(container_port)
839+
840+
# Normalize and validate tunnel config
841+
try:
842+
normalized = self._normalize_extra_tunnel_config(container_port, tunnel_config)
843+
if not normalized:
844+
raise ValueError(f"EXTRA_TUNNELS[{container_port}] token is empty")
845+
self.extra_tunnel_configs[container_port] = normalized
846+
except Exception as e:
847+
raise ValueError(f"EXTRA_TUNNELS[{container_port}] validation failed: {e}")
848+
849+
# Allocate ports for EXTRA_TUNNELS not in CONTAINER_RESOURCES
850+
if ports_to_allocate:
851+
self._allocate_extra_tunnel_ports(ports_to_allocate)
852+
853+
self.P(f"EXTRA_TUNNELS validated: {len(self.extra_tunnel_configs)} tunnel(s) configured", color='g')
854+
return True
855+
856+
### END EXTRA TUNNELS METHODS ###

extensions/business/deeploy/deeploy_const.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ class DEEPLOY_PLUGIN_DATA:
197197

198198
CONTAINER_APP_RUNNER_SIGNATURE = 'CONTAINER_APP_RUNNER'
199199
WORKER_APP_RUNNER_SIGNATURE = 'WORKER_APP_RUNNER'
200-
200+
CONTAINERIZED_APPS_SIGNATURES = [CONTAINER_APP_RUNNER_SIGNATURE, WORKER_APP_RUNNER_SIGNATURE]
201201

202202
class JOB_APP_TYPES:
203203
GENERIC = "generic"

extensions/business/deeploy/deeploy_manager_api.py

Lines changed: 77 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -208,58 +208,90 @@ def _process_pipeline_request(
208208

209209
# Get nodes based on operation type
210210
discovered_plugin_instances = []
211+
deployment_nodes = []
212+
confirmation_nodes = []
213+
deeploy_specs_for_update = None
211214
if is_create:
212-
nodes = self._check_nodes_availability(inputs)
215+
deployment_nodes = self._check_nodes_availability(inputs)
216+
confirmation_nodes = list(deployment_nodes)
213217
else:
214-
discovered_plugin_instances = self._discover_plugin_instances(app_id=app_id, job_id=job_id, owner=sender)
215-
218+
# Discover the live deployment so we can validate node affinity and reuse existing specs.
219+
pipeline_context = self._gather_running_pipeline_context(
220+
owner=sender,
221+
app_id=app_id,
222+
job_id=job_id,
223+
)
224+
discovered_plugin_instances = pipeline_context["discovered_instances"]
225+
current_nodes = pipeline_context["nodes"]
226+
deeploy_specs_for_update = pipeline_context["deeploy_specs"]
216227
self.P(f"Discovered plugin instances: {self.json_dumps(discovered_plugin_instances)}")
217-
deeploy_specs_for_update = None
218-
if job_app_type in (JOB_APP_TYPES.NATIVE, JOB_APP_TYPES.GENERIC, JOB_APP_TYPES.SERVICE):
219-
discovered_plugin_instances = self._ensure_plugin_instance_ids(
220-
inputs=inputs,
221-
discovered_plugin_instances=discovered_plugin_instances,
222-
owner=sender,
223-
app_id=app_id,
224-
job_id=job_id,
225-
)
226-
deeploy_specs_for_update = self._prepare_updated_deeploy_specs(
227-
owner=sender,
228-
app_id=app_id,
229-
job_id=job_id,
230-
discovered_plugin_instances=discovered_plugin_instances,
228+
229+
requested_nodes = inputs.get(DEEPLOY_KEYS.TARGET_NODES, None) or []
230+
normalized_requested_nodes = [
231+
self._check_and_maybe_convert_address(node) for node in requested_nodes
232+
] if requested_nodes else []
233+
234+
if normalized_requested_nodes:
235+
# Reject updates that request a different node set than the one currently running.
236+
if set(normalized_requested_nodes) != set(current_nodes):
237+
msg = (
238+
f"{DEEPLOY_ERRORS.NODES2}: Update request must target existing nodes {current_nodes}. "
239+
f"Received {normalized_requested_nodes}."
240+
)
241+
raise ValueError(msg)
242+
243+
requested_nodes_count = inputs.get(DEEPLOY_KEYS.TARGET_NODES_COUNT, 0)
244+
if requested_nodes_count and requested_nodes_count != len(current_nodes):
245+
msg = (
246+
f"{DEEPLOY_ERRORS.NODES2}: Update request must keep the original number of nodes "
247+
f"({len(current_nodes)}). Received {requested_nodes_count}."
231248
)
232-
nodes = [instance[DEEPLOY_PLUGIN_DATA.NODE] for instance in discovered_plugin_instances]
249+
raise ValueError(msg)
233250

234-
if is_create:
235-
dct_status, str_status = self.check_and_deploy_pipelines(
236-
sender=sender,
237-
inputs=inputs,
238-
app_id=app_id,
239-
app_alias=app_alias,
240-
app_type=app_type,
241-
new_nodes=nodes,
242-
update_nodes=[],
243-
discovered_plugin_instances=discovered_plugin_instances,
244-
job_app_type=job_app_type,
245-
)
246-
else:
247-
dct_status, str_status = self.check_and_deploy_pipelines(
248-
sender=sender,
249-
inputs=inputs,
251+
inputs[DEEPLOY_KEYS.TARGET_NODES] = current_nodes
252+
inputs.target_nodes = current_nodes
253+
inputs[DEEPLOY_KEYS.TARGET_NODES_COUNT] = len(current_nodes)
254+
inputs.target_nodes_count = len(current_nodes)
255+
256+
# TODO: Assess whether removing the running pipeline before redeploying is safe when the new launch fails.
257+
self.delete_pipeline_from_nodes(
250258
app_id=app_id,
251-
app_alias=app_alias,
252-
app_type=app_type,
253-
new_nodes=[],
254-
update_nodes=nodes,
255-
discovered_plugin_instances=discovered_plugin_instances,
256-
dct_deeploy_specs=deeploy_specs_for_update,
257-
job_app_type=job_app_type,
259+
job_id=job_id,
260+
owner=sender,
261+
discovered_instances=discovered_plugin_instances,
258262
)
263+
264+
deployment_nodes = self._check_nodes_availability(inputs)
265+
if set(deployment_nodes) != set(current_nodes):
266+
msg = (
267+
f"{DEEPLOY_ERRORS.NODES2}: Failed to validate that update runs on existing nodes. "
268+
f"Expected {current_nodes}, validated {deployment_nodes}."
269+
)
270+
raise ValueError(msg)
271+
confirmation_nodes = list(deployment_nodes)
272+
discovered_plugin_instances = []
273+
274+
inputs[DEEPLOY_KEYS.TARGET_NODES] = deployment_nodes
275+
inputs.target_nodes = deployment_nodes
276+
inputs[DEEPLOY_KEYS.TARGET_NODES_COUNT] = len(deployment_nodes)
277+
inputs.target_nodes_count = len(deployment_nodes)
278+
279+
dct_status, str_status = self.check_and_deploy_pipelines(
280+
sender=sender,
281+
inputs=inputs,
282+
app_id=app_id,
283+
app_alias=app_alias,
284+
app_type=app_type,
285+
new_nodes=deployment_nodes,
286+
update_nodes=[],
287+
discovered_plugin_instances=discovered_plugin_instances,
288+
dct_deeploy_specs_create=deeploy_specs_for_update,
289+
job_app_type=job_app_type,
290+
)
259291

260-
if str_status in [DEEPLOY_STATUS.SUCCESS, DEEPLOY_STATUS.COMMAND_DELIVERED]:
261-
if (dct_status is not None and is_confirmable_job and len(nodes) == len(dct_status)) or not is_confirmable_job:
262-
eth_nodes = [self.bc.node_addr_to_eth_addr(node) for node in nodes]
292+
if is_create and str_status in [DEEPLOY_STATUS.SUCCESS, DEEPLOY_STATUS.COMMAND_DELIVERED]:
293+
if (dct_status is not None and is_confirmable_job and len(confirmation_nodes) == len(dct_status)) or not is_confirmable_job:
294+
eth_nodes = [self.bc.node_addr_to_eth_addr(node) for node in confirmation_nodes]
263295
eth_nodes = sorted(eth_nodes)
264296
self.bc.submit_node_update(
265297
job_id=job_id,
@@ -483,6 +515,7 @@ def update_pipeline(
483515
484516
Notes
485517
-----
518+
- Existing pipelines are stopped and redeployed in place; requests must reference the active node set.
486519
- Updates are applied to existing plugin instances on the same nodes
487520
- For multi-plugin pipelines, all plugins are updated with new configurations
488521
- Resource validation applies the same as create operations

0 commit comments

Comments
 (0)