Skip to content

Commit f7b887a

Browse files
authored
Merge pull request #3211 from PrincetonUniversity/devel
Devel
2 parents 5c8168f + beebd2a commit f7b887a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+2350
-1272
lines changed

.github/actions/install-pnl/action.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ runs:
103103
pip install setuptools wheel
104104
python setup.py sdist bdist_wheel
105105
echo "wheel=$(ls dist/*.whl)" | tee -a "$GITHUB_OUTPUT"
106-
echo "sdist=$(ls dist/*.sdist)" | tee -a "$GITHUB_OUTPUT"
106+
echo "sdist=$(ls dist/*.tar.gz)" | tee -a "$GITHUB_OUTPUT"
107107
108108
- name: Python dependencies
109109
shell: bash

conftest.py

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -71,8 +71,7 @@ def pytest_runtest_setup(item):
7171
def pytest_generate_tests(metafunc):
7272
mech_and_func_modes = ['Python',
7373
pytest.param('LLVM', marks=pytest.mark.llvm),
74-
pytest.param('PTX', marks=[pytest.mark.llvm,
75-
pytest.mark.cuda])
74+
pytest.param('PTX', marks=[pytest.mark.llvm, pytest.mark.cuda])
7675
]
7776

7877
if "func_mode" in metafunc.fixturenames:
@@ -81,9 +80,9 @@ def pytest_generate_tests(metafunc):
8180
if "mech_mode" in metafunc.fixturenames:
8281
metafunc.parametrize("mech_mode", mech_and_func_modes)
8382

84-
if "comp_mode_no_llvm" in metafunc.fixturenames:
83+
if "comp_mode_no_per_node" in metafunc.fixturenames:
8584
modes = [m for m in get_comp_execution_modes()
86-
if m.values[0] is not pnlvm.ExecutionMode.LLVM]
85+
if m.values[0] is not pnlvm.ExecutionMode._LLVMPerNode]
8786
metafunc.parametrize("comp_mode", modes)
8887

8988
elif "comp_mode" in metafunc.fixturenames:
@@ -151,7 +150,7 @@ def pytest_runtest_teardown(item):
151150
pnlvm.cleanup("llvm" in item.keywords and not skip_cleanup_check)
152151

153152
@pytest.fixture
154-
def comp_mode_no_llvm():
153+
def comp_mode_no_per_node():
155154
# dummy fixture to allow 'comp_mode' filtering
156155
pass
157156

@@ -187,8 +186,8 @@ def llvm_current_fp_precision():
187186
@pytest.helpers.register
188187
def get_comp_execution_modes():
189188
return [pytest.param(pnlvm.ExecutionMode.Python),
190-
pytest.param(pnlvm.ExecutionMode.LLVM, marks=pytest.mark.llvm),
191-
pytest.param(pnlvm.ExecutionMode.LLVMExec, marks=pytest.mark.llvm),
189+
pytest.param(pnlvm.ExecutionMode._LLVMPerNode, marks=pytest.mark.llvm),
190+
pytest.param(pnlvm.ExecutionMode._LLVMExec, marks=pytest.mark.llvm),
192191
pytest.param(pnlvm.ExecutionMode.LLVMRun, marks=pytest.mark.llvm),
193192
pytest.param(pnlvm.ExecutionMode.PTXRun, marks=[pytest.mark.llvm, pytest.mark.cuda])
194193
]
@@ -208,29 +207,29 @@ def cuda_param(val):
208207
return pytest.param(val, marks=[pytest.mark.llvm, pytest.mark.cuda])
209208

210209
@pytest.helpers.register
211-
def get_func_execution(func, func_mode):
210+
def get_func_execution(func, func_mode, *, tags:frozenset=frozenset(), member='function'):
212211
if func_mode == 'LLVM':
213-
return pnlvm.execution.FuncExecution(func).execute
212+
return pnlvm.execution.FuncExecution(func, tags=tags).execute
214213

215214
elif func_mode == 'PTX':
216-
return pnlvm.execution.FuncExecution(func).cuda_execute
215+
return pnlvm.execution.FuncExecution(func, tags=tags).cuda_execute
217216

218217
elif func_mode == 'Python':
219-
return func.function
218+
return getattr(func, member)
220219
else:
221220
assert False, "Unknown function mode: {}".format(func_mode)
222221

223222
@pytest.helpers.register
224-
def get_mech_execution(mech, mech_mode):
223+
def get_mech_execution(mech, mech_mode, *, tags:frozenset=frozenset(), member='execute'):
225224
if mech_mode == 'LLVM':
226-
return pnlvm.execution.MechExecution(mech).execute
225+
return pnlvm.execution.MechExecution(mech, tags=tags).execute
227226

228227
elif mech_mode == 'PTX':
229-
return pnlvm.execution.MechExecution(mech).cuda_execute
228+
return pnlvm.execution.MechExecution(mech, tags=tags).cuda_execute
230229

231230
elif mech_mode == 'Python':
232231
def mech_wrapper(x):
233-
mech.execute(x)
232+
getattr(mech, member)(x)
234233
return mech.output_values
235234

236235
return mech_wrapper

cuda_requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
pycuda >2018, <2025
1+
pycuda >2018, <2026

docs/source/Compilation.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@ Use
3333
Compiled form of a model can be invoked by passing one of the following values to the `bin_execute` parameter of `Composition.run`, or `Composition.exec`:
3434

3535
* `ExecutionMode.Python`: Normal python execution
36-
* `ExecutionMode.LLVM`: Compile and execute individual nodes. The scheduling loop still runs in Python. If any of the nodes fails to compile, an error is raised. *NOTE:* Schedules that require access to node data will not work correctly.
37-
* `ExecutionMode.LLVMExec`: Execution of `Composition.exec` is replaced by a compiled equivalent. If the `Composition` fails to compile, an error is raised.
36+
* `ExecutionMode._LLVMPerNode`: Compile and execute individual nodes. The scheduling loop still runs in Python. If any of the nodes fails to compile, an error is raised. *NOTE:* Schedules that require access to node data will not work correctly.
37+
* `ExecutionMode._LLVMExec`: Execution of `Composition.exec` is replaced by a compiled equivalent. If the `Composition` fails to compile, an error is raised.
3838
* `ExecutionMode.LLVMRun`: Execution of `Composition.run` is replaced by a compiled equivalent. If the `Composition` fails to compile, an error is raised.
3939
* `ExecutionMode.Auto`: This option attempts all three above mentioned granularities, and gracefully falls back to lower granularity. Warnings are raised in place of errors. This is the recommended way to invoke compiled execution as the final fallback is the Python baseline.
4040

psyneulink/core/components/component.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1484,7 +1484,7 @@ def _get_compilation_params(self):
14841484
"control_signal", "competition",
14851485
"has_recurrent_input_port", "enable_learning",
14861486
"enable_output_type_conversion", "changes_shape",
1487-
"output_type", "bounds", "internal_only",
1487+
"output_type", "range", "internal_only",
14881488
"require_projection_in_composition", "default_input",
14891489
"shadow_inputs", "compute_reconfiguration_cost",
14901490
"reconfiguration_cost", "net_outcome", "outcome",

psyneulink/core/components/functions/function.py

Lines changed: 28 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -171,9 +171,22 @@
171171
from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel
172172
from psyneulink.core.globals.registry import register_category
173173
from psyneulink.core.globals.utilities import (
174-
convert_all_elements_to_np_array, convert_to_np_array, get_global_seed, is_instance_or_subclass, object_has_single_value, parameter_spec, parse_valid_identifier, safe_len,
175-
SeededRandomState, try_extract_0d_array_item, contains_type, is_numeric, NumericCollections,
176-
random_matrix, array_from_matrix_string
174+
NumericCollections,
175+
SeededRandomState,
176+
_get_global_seed,
177+
array_from_matrix_string,
178+
contains_type,
179+
convert_all_elements_to_np_array,
180+
convert_to_np_array,
181+
is_instance_or_subclass,
182+
is_numeric,
183+
is_numeric_scalar,
184+
object_has_single_value,
185+
parameter_spec,
186+
parse_valid_identifier,
187+
random_matrix,
188+
safe_len,
189+
try_extract_0d_array_item,
177190
)
178191

179192
__all__ = [
@@ -357,7 +370,7 @@ def _seed_setter(value, owning_component, context, *, compilation_sync):
357370

358371
value = try_extract_0d_array_item(value)
359372
if value is None or value == DEFAULT_SEED():
360-
value = get_global_seed()
373+
value = _get_global_seed()
361374

362375
# Remove any old PRNG state
363376
owning_component.parameters.random_state.set(None, context=context)
@@ -438,7 +451,7 @@ class Function_Base(Function):
438451
prefs=None \
439452
)
440453
441-
Implement abstract class for Function category of Component class
454+
Abstract base class for Function category of Component class
442455
443456
COMMENT:
444457
Description:
@@ -1000,8 +1013,8 @@ def _get_pytorch_fct_param_value(self, param_name, device, context):
10001013

10011014

10021015
# ***************************************** EXAMPLE FUNCTION *******************************************************
1003-
PROPENSITY = "PROPENSITY"
1004-
PERTINACITY = "PERTINACITY"
1016+
PROPENSITY = "propensity"
1017+
PERTINACITY = "pertinacity"
10051018

10061019

10071020
class ArgumentTherapy(Function_Base):
@@ -1083,6 +1096,10 @@ class ArgumentTherapy(Function_Base):
10831096
REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE),
10841097
}
10851098

1099+
class Parameters(Function_Base.Parameters):
1100+
propensity = None
1101+
pertinacity = None
1102+
10861103
# Mode indicators
10871104
class Manner(Enum):
10881105
OBSEQUIOUS = 0
@@ -1095,16 +1112,16 @@ class Manner(Enum):
10951112
@check_user_specified
10961113
def __init__(self,
10971114
default_variable=None,
1098-
propensity=10.0,
1099-
pertincacity=Manner.CONTRARIAN,
1115+
propensity=Manner.CONTRARIAN,
1116+
pertinacity=10.0,
11001117
params=None,
11011118
owner=None,
11021119
prefs: Optional[ValidPrefSet] = None):
11031120

11041121
super().__init__(
11051122
default_variable=default_variable,
11061123
propensity=propensity,
1107-
pertinacity=pertincacity,
1124+
pertinacity=pertinacity,
11081125
params=params,
11091126
owner=owner,
11101127
prefs=prefs,
@@ -1155,7 +1172,7 @@ def _validate_params(self, request_set, target_set=None, context=None):
11551172

11561173
# Validate param
11571174
if param_name == PERTINACITY:
1158-
if isinstance(param_value, numbers.Number) and 0 <= param_value <= 10:
1175+
if is_numeric_scalar(param_value) and 0 <= param_value <= 10:
11591176
# target_set[PERTINACITY] = param_value
11601177
pass # This leaves param in request_set, clear to be assigned to target_set in call to super below
11611178
else:

psyneulink/core/components/functions/nonstateful/learningfunctions.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -506,6 +506,15 @@ def func(entry_to_store,
506506
decay_rate,
507507
random_state)->torch.tensor:
508508
"""Decay existing memories and replace weakest entry with entry_to_store (parallel EMStorage._function)"""
509+
510+
# If the batch_size is not equal to one then we need to raise an exception.
511+
if len(entry_to_store.shape) > 2:
512+
if entry_to_store.shape[0] != 1:
513+
raise NotImplementedError("EMSStorage has not been implemented for batch sizes greater than 1")
514+
else:
515+
# Drop the singleton batch dimension
516+
entry_to_store = entry_to_store[0]
517+
509518
if random_state.uniform(0, 1) < storage_prob:
510519
if decay_rate:
511520
memory_matrix *= torch.tensor(decay_rate)
@@ -515,9 +524,9 @@ def func(entry_to_store,
515524
# Find weakest entry (i.e., with lowest norm) along specified axis of matrix
516525
idx_of_min = torch.argmin(torch.linalg.norm(memory_matrix, axis=axis))
517526
if axis == 0:
518-
memory_matrix[:,idx_of_min] = entry_to_store
527+
memory_matrix[:,idx_of_min] = entry_to_store[0]
519528
elif axis == 1:
520-
memory_matrix[idx_of_min,:] = entry_to_store
529+
memory_matrix[idx_of_min,:] = entry_to_store[0]
521530
return memory_matrix
522531
return func
523532

psyneulink/core/components/functions/nonstateful/optimizationfunctions.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -641,7 +641,7 @@ def _evaluate(self, variable=None, context=None, params=None, fit_evaluate=False
641641
# Run compiled mode if requested by parameter and everything is initialized
642642
if self.owner and self.owner.parameters.comp_execution_mode._get(context) != 'Python' and \
643643
ContextFlags.PROCESSING in context.flags:
644-
all_samples = [s for s in itertools.product(*self.search_space)]
644+
all_samples = list(itertools.product(*self.search_space))
645645
all_values, num_evals = self._grid_evaluate(self.owner, context, fit_evaluate)
646646
assert len(all_values) == num_evals
647647
assert len(all_samples) == num_evals
@@ -846,7 +846,7 @@ def reset_grid(self, context):
846846
"""Reset iterators in `search_space <GridSearch.search_space>`"""
847847
for s in self.search_space:
848848
s.reset()
849-
self.parameters.grid._set(itertools.product(*[s for s in self.search_space]), context)
849+
self.parameters.grid._set((s for s in itertools.product(*[s for s in self.search_space])), context)
850850

851851
def _traverse_grid(self, variable, sample_num, context=None):
852852
"""Get next sample from grid.

psyneulink/core/components/functions/nonstateful/selectionfunctions.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -218,8 +218,6 @@ class OneHot(SelectionFunction):
218218
function. Values specified for parameters in the dictionary override any assigned to those parameters in
219219
arguments of the constructor.
220220
221-
bounds : None
222-
223221
owner : Component
224222
`component <Component>` to which to assign the Function.
225223

0 commit comments

Comments
 (0)