Skip to content

Commit 730e9d8

Browse files
committed
Merge branch 'devel' of https://github.com/PrincetonUniversity/PsyNeuLink into docs/composition_randomization
# Conflicts: # docs/source/Core.rst # docs/source/Services.rst # psyneulink/core/globals/utilities.py
2 parents 41f9c53 + cbfc6f5 commit 730e9d8

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+1869
-793
lines changed

.github/workflows/pnl-ci.yml

+3-1
Original file line numberDiff line numberDiff line change
@@ -188,5 +188,7 @@ jobs:
188188
if: matrix.version-restrict == ''
189189
with:
190190
name: dist-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}
191-
path: ${{ steps.install.outputs.wheel }} ${{ steps.install.outputs.sdist }}
191+
path: |
192+
${{ steps.install.outputs.wheel }}
193+
${{ steps.install.outputs.sdist }}
192194
retention-days: 2

Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/EGO Model - Revaluation.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -692,7 +692,7 @@ def control_function(variable, context=None, **kwargs):
692692

693693
# Validate construction
694694
proj_from_retrieved_reward_to_control = control_layer.input_ports[1].path_afferents[0]
695-
assert proj_from_retrieved_reward_to_control._feedback == True
695+
assert proj_from_retrieved_reward_to_control.feedback == EdgeType.FEEDBACK
696696
assert proj_from_retrieved_reward_to_control in EGO_comp.feedback_projections # retrieved_reward feedback
697697
assert context_layer.input_port.path_afferents[0].sender.owner == context_layer # recurrent projection
698698
assert context_layer.input_port.path_afferents[0].parameters.matrix.get()[0][0] == 1-context_integration_rate

Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -697,7 +697,7 @@ def control_function(variable, context=None, **kwargs):
697697

698698
# Validate construction
699699
proj_from_retrieved_reward_to_control = control_layer.input_ports[1].path_afferents[0]
700-
assert proj_from_retrieved_reward_to_control._feedback == True
700+
assert proj_from_retrieved_reward_to_control.feedback == EdgeType.FEEDBACK
701701
assert proj_from_retrieved_reward_to_control in EGO_comp.feedback_projections # retrieved_reward feedback
702702
assert context_layer.input_port.path_afferents[0].sender.owner == context_layer # recurrent projection
703703
assert context_layer.input_port.path_afferents[0].parameters.matrix.get()[0][0] == 1-context_integration_rate

conftest.py

+25
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import contextlib
22
import doctest
3+
import inspect
34
import io
45
import itertools
56
import numpy as np
@@ -310,3 +311,27 @@ def pytest_configure(config):
310311
psyneulink._called_from_pytest = True
311312

312313
patch_parameter_set_value_numeric_check()
314+
315+
316+
@pytest.helpers.register
317+
def get_all_subclasses(
318+
type_=psyneulink.core.components.component.ComponentsMeta,
319+
module=psyneulink,
320+
include_abstract=True,
321+
sort=True,
322+
):
323+
classes = []
324+
325+
for item in module.__all__:
326+
cls_ = getattr(module, item)
327+
328+
if (
329+
isinstance(cls_, type_)
330+
and (include_abstract or not inspect.isabstract(cls_))
331+
):
332+
classes.append(cls_)
333+
334+
if sort:
335+
classes.sort(key=lambda x: x.__name__)
336+
337+
return classes

cuda_requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
pycuda >2018, <2025
1+
pycuda >2018, <2026

dev_requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
jupyter<1.1.2
22
packaging<25.0
3-
pytest<8.3.5
3+
pytest<8.3.6
44
pytest-benchmark<5.1.1
55
pytest-cov<6.0.1
66
pytest-forked<1.7.0

docs/source/Composition.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,6 @@ Composition
3131
Report
3232

3333
.. automodule:: psyneulink.core.compositions.composition
34-
:members: Composition, NodeRole, Graph
34+
:members: Composition, NodeRole
3535
:private-members:
3636
:exclude-members: Parameters, show_structure, CompositionError, get_inputs_format, external_input_ports_of_all_input_nodes, external_input_ports

docs/source/Core.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -96,4 +96,4 @@ Core
9696
- `Report`
9797
- `Log`
9898
- `mdf`
99-
- `Utilities`
99+
- :py:module:`Graph`

docs/source/Graph.rst

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
Graph
2+
=====
3+
4+
.. automodule:: psyneulink.core.globals.graph
5+
:members:

docs/source/Services.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,4 @@ Services
1010
Preferences
1111
json
1212
Compilation
13-
Utilities
13+
Graph

psyneulink/core/components/functions/function.py

+27-10
Original file line numberDiff line numberDiff line change
@@ -171,9 +171,22 @@
171171
from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel
172172
from psyneulink.core.globals.registry import register_category
173173
from psyneulink.core.globals.utilities import (
174-
convert_all_elements_to_np_array, convert_to_np_array, get_global_seed, is_instance_or_subclass, object_has_single_value, parameter_spec, parse_valid_identifier, safe_len,
175-
SeededRandomState, try_extract_0d_array_item, contains_type, is_numeric, NumericCollections,
176-
random_matrix, array_from_matrix_string
174+
NumericCollections,
175+
SeededRandomState,
176+
_get_global_seed,
177+
array_from_matrix_string,
178+
contains_type,
179+
convert_all_elements_to_np_array,
180+
convert_to_np_array,
181+
is_instance_or_subclass,
182+
is_numeric,
183+
is_numeric_scalar,
184+
object_has_single_value,
185+
parameter_spec,
186+
parse_valid_identifier,
187+
random_matrix,
188+
safe_len,
189+
try_extract_0d_array_item,
177190
)
178191

179192
__all__ = [
@@ -357,7 +370,7 @@ def _seed_setter(value, owning_component, context, *, compilation_sync):
357370

358371
value = try_extract_0d_array_item(value)
359372
if value is None or value == DEFAULT_SEED():
360-
value = get_global_seed()
373+
value = _get_global_seed()
361374

362375
# Remove any old PRNG state
363376
owning_component.parameters.random_state.set(None, context=context)
@@ -1000,8 +1013,8 @@ def _get_pytorch_fct_param_value(self, param_name, device, context):
10001013

10011014

10021015
# ***************************************** EXAMPLE FUNCTION *******************************************************
1003-
PROPENSITY = "PROPENSITY"
1004-
PERTINACITY = "PERTINACITY"
1016+
PROPENSITY = "propensity"
1017+
PERTINACITY = "pertinacity"
10051018

10061019

10071020
class ArgumentTherapy(Function_Base):
@@ -1083,6 +1096,10 @@ class ArgumentTherapy(Function_Base):
10831096
REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE),
10841097
}
10851098

1099+
class Parameters(Function_Base.Parameters):
1100+
propensity = None
1101+
pertinacity = None
1102+
10861103
# Mode indicators
10871104
class Manner(Enum):
10881105
OBSEQUIOUS = 0
@@ -1095,16 +1112,16 @@ class Manner(Enum):
10951112
@check_user_specified
10961113
def __init__(self,
10971114
default_variable=None,
1098-
propensity=10.0,
1099-
pertincacity=Manner.CONTRARIAN,
1115+
propensity=Manner.CONTRARIAN,
1116+
pertinacity=10.0,
11001117
params=None,
11011118
owner=None,
11021119
prefs: Optional[ValidPrefSet] = None):
11031120

11041121
super().__init__(
11051122
default_variable=default_variable,
11061123
propensity=propensity,
1107-
pertinacity=pertincacity,
1124+
pertinacity=pertinacity,
11081125
params=params,
11091126
owner=owner,
11101127
prefs=prefs,
@@ -1155,7 +1172,7 @@ def _validate_params(self, request_set, target_set=None, context=None):
11551172

11561173
# Validate param
11571174
if param_name == PERTINACITY:
1158-
if isinstance(param_value, numbers.Number) and 0 <= param_value <= 10:
1175+
if is_numeric_scalar(param_value) and 0 <= param_value <= 10:
11591176
# target_set[PERTINACITY] = param_value
11601177
pass # This leaves param in request_set, clear to be assigned to target_set in call to super below
11611178
else:

psyneulink/core/components/functions/nonstateful/learningfunctions.py

+11-2
Original file line numberDiff line numberDiff line change
@@ -506,6 +506,15 @@ def func(entry_to_store,
506506
decay_rate,
507507
random_state)->torch.tensor:
508508
"""Decay existing memories and replace weakest entry with entry_to_store (parallel EMStorage._function)"""
509+
510+
# If the batch_size is not equal to one then we need to raise an exception.
511+
if len(entry_to_store.shape) > 2:
512+
if entry_to_store.shape[0] != 1:
513+
raise NotImplementedError("EMSStorage has not been implemented for batch sizes greater than 1")
514+
else:
515+
# Drop the singleton batch dimension
516+
entry_to_store = entry_to_store[0]
517+
509518
if random_state.uniform(0, 1) < storage_prob:
510519
if decay_rate:
511520
memory_matrix *= torch.tensor(decay_rate)
@@ -515,9 +524,9 @@ def func(entry_to_store,
515524
# Find weakest entry (i.e., with lowest norm) along specified axis of matrix
516525
idx_of_min = torch.argmin(torch.linalg.norm(memory_matrix, axis=axis))
517526
if axis == 0:
518-
memory_matrix[:,idx_of_min] = entry_to_store
527+
memory_matrix[:,idx_of_min] = entry_to_store[0]
519528
elif axis == 1:
520-
memory_matrix[idx_of_min,:] = entry_to_store
529+
memory_matrix[idx_of_min,:] = entry_to_store[0]
521530
return memory_matrix
522531
return func
523532

psyneulink/core/components/functions/nonstateful/optimizationfunctions.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -641,7 +641,7 @@ def _evaluate(self, variable=None, context=None, params=None, fit_evaluate=False
641641
# Run compiled mode if requested by parameter and everything is initialized
642642
if self.owner and self.owner.parameters.comp_execution_mode._get(context) != 'Python' and \
643643
ContextFlags.PROCESSING in context.flags:
644-
all_samples = [s for s in itertools.product(*self.search_space)]
644+
all_samples = list(itertools.product(*self.search_space))
645645
all_values, num_evals = self._grid_evaluate(self.owner, context, fit_evaluate)
646646
assert len(all_values) == num_evals
647647
assert len(all_samples) == num_evals
@@ -846,7 +846,7 @@ def reset_grid(self, context):
846846
"""Reset iterators in `search_space <GridSearch.search_space>`"""
847847
for s in self.search_space:
848848
s.reset()
849-
self.parameters.grid._set(itertools.product(*[s for s in self.search_space]), context)
849+
self.parameters.grid._set((s for s in itertools.product(*[s for s in self.search_space])), context)
850850

851851
def _traverse_grid(self, variable, sample_num, context=None):
852852
"""Get next sample from grid.

psyneulink/core/components/functions/nonstateful/transferfunctions.py

+53-23
Original file line numberDiff line numberDiff line change
@@ -2828,9 +2828,10 @@ class SoftMax(TransferFunction):
28282828
<SoftMax.gain>` parametrically based on the `variable <SoftMax.variable>`:
28292829
28302830
- *mask_threshold* -- setting the **mask_threshold** argument to a scalar value causes the `variable
2831-
<SoftMax.variable>` to be thresholded by that value before applying the SoftMax function; any elements of
2832-
`variable <SoftMax.variable>` with an absolute value below the threshold are set to 0; all others are scaled
2833-
by the specified `gain <SoftMax.gain>` and then passed through the SoftMax function. This only applies if the
2831+
<SoftMax.variable>` to be thresholded by that value before applying the SoftMax function; Each element in
2832+
variable <SoftMax.variable> is first scaled by gain <SoftMax.gain>. Then, any elements with an absolute
2833+
value below *mask_threshold* are set to negative infinity (``-inf``), effectively masking them since
2834+
``exp(-inf) = 0``. The remaining values are then passed through the SoftMax function. This only applies if the
28342835
**gain** argument is specified as a scalar; if it is specified as *ADAPTIVE*, then the **mask_threshold**
28352836
argument is ignored.
28362837
@@ -2920,10 +2921,11 @@ class SoftMax(TransferFunction):
29202921
29212922
mask_threshold : scalar or None
29222923
determines whether the `variable <SoftMax.variable>` is thresholded before applying the SoftMax function;
2923-
if it is a scalar, only elements of `variable <SoftMax.variable>` with an absolute value greater than that
2924-
value are considered when applying the SoftMax function (which are then scaled by the `gain <SoftMax.gain>`
2925-
parameter; all other elements are assigned 0. This only applies if `gain <SoftMax.gain>` is specified as a
2926-
scalar; otherwise it is ignored (see `Thresholding and Adaptive Gain <SoftMax_AdaptGain>` for details).
2924+
if it is a scalar, each elements of `variable <SoftMax.variable>` is first scaled by `<SoftMax.gain>`. Then,
2925+
only elements with an absolute value greater than *mask_threshold* are considered when applying the SoftMax
2926+
function, while all other elements are set to ``-inf`` effectively masking them since ``exp(-inf) = 0``.
2927+
This only applies if `gain <SoftMax.gain>` is specified as a scalar; otherwise it is ignored
2928+
(see `Thresholding and Adaptive Gain <SoftMax_AdaptGain>` for details).
29272929
29282930
adapt_scale : scalar
29292931
determines the *scale* parameter using by the `adapt_gain <SoftMax.adapt_gain>` method (see method for details).
@@ -3149,22 +3151,31 @@ def _validate_variable(self, variable, context=None):
31493151
return np.asarray(variable)
31503152

31513153
def apply_softmax(self, input_value, gain, mask_threshold, output_type):
3152-
31533154
# Modulate input_value by gain
31543155
v = gain * input_value
3155-
# Shift by max to avoid extreme values:
3156-
v = v - np.max(v)
3156+
3157+
# Mask threshold
3158+
if mask_threshold is not None:
3159+
if np.any(v < 0):
3160+
warnings.warn(f"SoftMax function: mask_threshold is set "
3161+
f"to {mask_threshold} but input_value contains negative values."
3162+
f"Masking will be applied to the magnitude of the input.")
3163+
3164+
v = np.where(np.abs(v) > mask_threshold, v, -np.inf)
3165+
3166+
# Make numerically stable by shifting by max value
3167+
if np.any(v != -np.inf):
3168+
v = v - np.max(v)
3169+
31573170
# Exponentiate
31583171
v = np.exp(v)
3159-
# Threshold if specified:
3160-
if mask_threshold:
3161-
v = v * np.where(input_value > mask_threshold, v, 0)
3172+
31623173
# Normalize (to sum to 1)
3163-
if not any(v):
3174+
if not np.any(v):
31643175
# If v is all zeros, avoid divide by zero in normalize and return all zeros for softmax
31653176
sm = v
31663177
else:
3167-
sm = v / np.sum(v, axis=0)
3178+
sm = v / np.sum(v)
31683179

31693180
# Generate one-hot encoding based on selected output_type
31703181
if output_type in {ARG_MAX, ARG_MAX_INDICATOR, MAX_VAL, MAX_INDICATOR}:
@@ -3472,15 +3483,34 @@ def _gen_pytorch_fct(self, device, context=None):
34723483
if isinstance(gain, str) and gain == ADAPTIVE:
34733484
return lambda x: (torch.softmax(self._gen_pytorch_adapt_gain_fct(device, context)(x) * x, -1))
34743485

3475-
elif mask_threshold:
3486+
elif mask_threshold is not None:
34763487
def pytorch_thresholded_softmax(_input: torch.Tensor) -> torch.Tensor:
3477-
# Mask elements of input below threshold
3478-
_mask = (torch.abs(_input) > mask_threshold)
3479-
# Subtract off the max value in the input to eliminate extreme values, exponentiate, and apply mask
3480-
masked_exp = _mask * torch.exp(gain * (_input - torch.max(_input, -1, keepdim=True)[0]))
3481-
if (masked_exp == 0).all():
3482-
return masked_exp
3483-
return masked_exp / torch.sum(masked_exp, -1, keepdim=True)
3488+
v = gain * _input
3489+
3490+
# Apply threshold-based masking
3491+
if mask_threshold is not None:
3492+
if torch.any(_input < 0):
3493+
warnings.warn(f"Softmax function: mask_threshold is set to {mask_threshold}, "
3494+
f"but input contains negative values. "
3495+
f"Masking will be applied to the magnitude of the input.")
3496+
3497+
# Create a mask where values below threshold are set to -inf
3498+
mask = torch.abs(v) > mask_threshold
3499+
v = v.masked_fill(~mask, float('-inf')) # More stable than torch.where()
3500+
3501+
# Handle case where all values are masked (return tensor with gradient support)
3502+
if torch.all(~mask):
3503+
return torch.full_like(v, 0.0, requires_grad=True)
3504+
3505+
# Make numerically stable by shifting max value
3506+
max_v = torch.max(v[mask]) # Avoid computing max over -inf
3507+
v = v - max_v
3508+
3509+
# Compute softmax (PyTorch handles -inf correctly)
3510+
exp_v = torch.exp(v)
3511+
sm = exp_v / torch.sum(exp_v, dim=-1, keepdim=True)
3512+
3513+
return sm
34843514
# Return the function
34853515
return pytorch_thresholded_softmax
34863516

psyneulink/core/components/functions/nonstateful/transformfunctions.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -1593,16 +1593,17 @@ def _gen_pytorch_fct(self, device, context=None):
15931593
weights = self._get_pytorch_fct_param_value('weights', device, context)
15941594
if weights is not None:
15951595
weights = torch.tensor(weights, device=device).double()
1596+
# Note: the first dimension of x is batch, aggregate over the second dimension
15961597
if self.operation == SUM:
15971598
if weights is not None:
1598-
return lambda x: torch.sum(x * weights, 0)
1599+
return lambda x: torch.sum(x * weights, 1)
15991600
else:
1600-
return lambda x: torch.sum(x, 0)
1601+
return lambda x: torch.sum(x, 1)
16011602
elif self.operation == PRODUCT:
16021603
if weights is not None:
1603-
return lambda x: torch.prod(x * weights, 0)
1604+
return lambda x: torch.prod(x * weights, 1)
16041605
else:
1605-
return lambda x: torch.prod(x, 0)
1606+
return lambda x: torch.prod(x, 1)
16061607
else:
16071608
from psyneulink.library.compositions.autodiffcomposition import AutodiffCompositionError
16081609
raise AutodiffCompositionError(f"The 'operation' parameter of {function.componentName} is not supported "

psyneulink/core/components/functions/stateful/integratorfunctions.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2070,15 +2070,15 @@ def _validate_params(self, request_set, target_set=None, context=None):
20702070

20712071
if RATE in request_set and request_set[RATE] is not None:
20722072
rate = request_set[RATE]
2073-
if np.isscalar(rate):
2073+
if is_numeric_scalar(rate):
20742074
rate = [rate]
20752075
if not all_within_range(rate, 0, 1):
20762076
raise FunctionError("Value(s) specified for {} argument of {} ({}) must be in interval [0,1]".
20772077
format(repr(RATE), self.__class__.__name__, rate))
20782078

20792079
if DECAY in request_set and request_set[DECAY] is not None:
20802080
decay = request_set[DECAY]
2081-
if np.isscalar(decay):
2081+
if is_numeric_scalar(decay):
20822082
decay = [decay]
20832083
if not all(0.0 <= d <= 1.0 for d in decay):
20842084
raise FunctionError("Value(s) specified for {} argument of {} ({}) must be in interval [0,1]".

psyneulink/core/components/functions/stateful/memoryfunctions.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -393,7 +393,7 @@ def _distance_field_weights_setter(value, owning_component=None, context=None):
393393
# NOTE: need the following to accommodate various forms of specification (single value, None's, etc)
394394
# that are resolved elsewhere
395395
# FIX: STANDARDIZE FORMAT FOR FIELDWEIGHTS HERE (AS LIST OF INTS) AND GET RID OF THE FOLLOWING
396-
test_val = np.array([int(np.array(val).item()) if val else 0 for val in value])
396+
test_val = np.array([int(np.array(val).item()) if (arr := np.array(val)).size and arr.item() else 0 for val in value])
397397
test_val = np.full(len(variable), test_val) if len(test_val) == 1 else test_val
398398
test_curr_field_weights = np.array([int(np.array(val).item()) if val else 0 for val in current_field_weights])
399399
test_curr_field_weights = (np.full(len(variable), test_curr_field_weights) if len(variable) == 1

0 commit comments

Comments
 (0)