6363
6464######################################################################
6565# Default outputs
66- # --------------
66+ # ----------------
6767#
6868# The default output of the function is `result`. The `PythonJob` task
6969# will store the result as one node in the database with the key `result`.
7070#
7171from aiida import load_profile
7272from aiida .engine import run_get_node
73- from aiida_pythonjob import PythonJob , prepare_pythonjob_inputs
73+ from aiida_pythonjob import PythonJob , prepare_pythonjob_inputs , spec
7474
7575load_profile ()
7676
@@ -91,7 +91,7 @@ def add(x, y):
9191# Custom outputs
9292# --------------
9393# If the function return a dictionary with fixed number of keys, and you
94- # want to store the values as separate outputs, you can specify the `output_ports ` parameter.
94+ # want to store the values as separate outputs, you can specify the `outputs_spec ` parameter.
9595# For a dynamic number of outputs, you can use the namespace output, which is explained later.
9696#
9797
@@ -103,10 +103,7 @@ def add(x, y):
103103inputs = prepare_pythonjob_inputs (
104104 add ,
105105 function_inputs = {"x" : 1 , "y" : 2 },
106- output_ports = [
107- {"name" : "sum" },
108- {"name" : "diff" },
109- ],
106+ outputs_spec = spec .namespace (sum = any , diff = any ),
110107)
111108result , node = run_get_node (PythonJob , ** inputs )
112109
@@ -117,7 +114,7 @@ def add(x, y):
117114
118115######################################################################
119116# Using parent folder
120- # --------------
117+ # -----------------------
121118# The parent_folder parameter allows a task to access the output files of
122119# a parent task. This feature is particularly useful when you want to reuse
123120# data generated by a previous computation in subsequent computations. In
@@ -142,15 +139,13 @@ def multiply(x, y):
142139inputs1 = prepare_pythonjob_inputs (
143140 add ,
144141 function_inputs = {"x" : 1 , "y" : 2 },
145- output_ports = [{"name" : "sum" }],
146142)
147143
148144result1 , node1 = run_get_node (PythonJob , inputs = inputs1 )
149145
150146inputs2 = prepare_pythonjob_inputs (
151147 multiply ,
152148 function_inputs = {"x" : 1 , "y" : 2 },
153- output_ports = [{"name" : "product" }],
154149 parent_folder = result1 ["remote_folder" ],
155150)
156151
@@ -160,7 +155,7 @@ def multiply(x, y):
160155
161156######################################################################
162157# Upload files or folders to the remote computer
163- # --------------
158+ # -------------------------------------------------
164159# The `upload_files` parameter allows users to upload files or folders to
165160# the remote computer. The files will be uploaded to the working directory of the remote computer.
166161#
@@ -202,7 +197,7 @@ def add():
202197
203198######################################################################
204199# Retrieve additional files from the remote computer
205- # --------------
200+ # ----------------------------------------------------
206201# Sometimes, one may want to retrieve additional files from the remote
207202# computer after the job has finished. For example, one may want to retrieve
208203# the output files generated by the `pw.x` calculation in Quantum ESPRESSO.
@@ -235,7 +230,7 @@ def add(x, y):
235230
236231######################################################################
237232# Namespace Output
238- # --------------
233+ # ------------------
239234#
240235# The `PythonJob` allows users to define namespace outputs. A namespace output
241236# is a dictionary with keys and values returned by a function. Each value in
@@ -264,18 +259,18 @@ def generate_structures(structure: Atoms, factor_lst: list) -> dict:
264259 atoms = structure .copy ()
265260 atoms .set_cell (atoms .cell * factor_lst [i ], scale_atoms = True )
266261 scaled_structures [f"s_{ i } " ] = atoms
267- return { " scaled_structures" : scaled_structures }
262+ return scaled_structures
268263
269264
270265inputs = prepare_pythonjob_inputs (
271266 generate_structures ,
272267 function_inputs = {"structure" : bulk ("Al" ), "factor_lst" : [0.95 , 1.0 , 1.05 ]},
273- output_ports = [{ "name" : "scaled_structures" , "identifier" : "namespace" }] ,
268+ outputs_spec = spec . dynamic ( Atoms ) ,
274269)
275270
276271result , node = run_get_node (PythonJob , inputs = inputs )
277272print ("scaled_structures: " )
278- for key , value in result [ "scaled_structures" ] .items ():
273+ for key , value in result .items ():
279274 print (key , value )
280275
281276
@@ -297,31 +292,20 @@ def generate_structures(structure: Atoms, factor_lst: list) -> dict:
297292 scaled_structures [f"s_{ i } " ] = atoms
298293 volumes [f"v_{ i } " ] = atoms .get_volume ()
299294 return {
300- "outputs" : {
301- "scaled_structures" : scaled_structures ,
302- "volume" : volumes ,
303- }
295+ "scaled_structures" : scaled_structures ,
296+ "volume" : volumes ,
304297 }
305298
306299
307300inputs = prepare_pythonjob_inputs (
308301 generate_structures ,
309302 function_inputs = {"structure" : bulk ("Al" ), "factor_lst" : [0.95 , 1.0 , 1.05 ]},
310- output_ports = [
311- {
312- "name" : "outputs" ,
313- "identifier" : "namespace" ,
314- "ports" : [
315- {"name" : "scaled_structures" , "identifier" : "namespace" },
316- {"name" : "volume" , "identifier" : "namespace" },
317- ],
318- }
319- ],
303+ outputs_spec = spec .namespace (scaled_structures = spec .dynamic (Atoms ), volume = spec .dynamic (float )),
320304)
321305
322306result , node = run_get_node (PythonJob , inputs = inputs )
323- print ("result: " , result ["outputs" ][ " scaled_structures" ])
324- print ("volumes: " , result ["outputs" ][ " volume" ])
307+ print ("result: " , result ["scaled_structures" ])
308+ print ("volumes: " , result ["volume" ])
325309
326310
327311######################################################################
@@ -420,7 +404,7 @@ def add(x, y):
420404
421405######################################################################
422406# Define your data serializer and deserializer
423- # --------------
407+ # ----------------------------------------------
424408#
425409# PythonJob search data serializer from the `aiida.data` entry point by the
426410# module name and class name (e.g., `ase.atoms.Atoms`).
0 commit comments