Skip to content

Commit

Permalink
Merge pull request #1744 from PrincetonUniversity/devel
Browse files Browse the repository at this point in the history
Devel
  • Loading branch information
dillontsmith committed Aug 31, 2020
2 parents 26e06a4 + 9f82056 commit 86a09ca
Show file tree
Hide file tree
Showing 30 changed files with 490 additions and 712 deletions.
8 changes: 0 additions & 8 deletions psyneulink/core/components/functions/transferfunctions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3749,12 +3749,10 @@ class Parameters(TransferFunction.Parameters):
_validate_intensity_cost_fct = get_validator_by_function(is_function_type)
intensity_cost_fct_mult_param = Parameter(modulable=True,
modulation_combination_function=PRODUCT,
aliases=INTENSITY_COST_FCT_MULTIPLICATIVE_PARAM,
getter=_intensity_cost_fct_mult_param_getter,
setter=_intensity_cost_fct_mult_param_setter)
intensity_cost_fct_add_param = Parameter(modulable=True,
modulation_combination_function=SUM,
aliases=INTENSITY_COST_FCT_ADDITIVE_PARAM,
getter=_intensity_cost_fct_add_param_getter,
setter=_intensity_cost_fct_add_param_setter)

Expand All @@ -3763,12 +3761,10 @@ class Parameters(TransferFunction.Parameters):
_validate_adjustment_cost_fct = get_validator_by_function(is_function_type)
adjustment_cost_fct_mult_param = Parameter(modulable=True,
modulation_combination_function=PRODUCT,
aliases=ADJUSTMENT_COST_FCT_MULTIPLICATIVE_PARAM,
getter=_adjustment_cost_fct_mult_param_getter,
setter=_adjustment_cost_fct_mult_param_setter)
adjustment_cost_fct_add_param = Parameter(modulable=True,
modulation_combination_function=SUM,
aliases=ADJUSTMENT_COST_FCT_ADDITIVE_PARAM,
getter=_adjustment_cost_fct_add_param_getter,
setter=_adjustment_cost_fct_add_param_setter)

Expand All @@ -3777,12 +3773,10 @@ class Parameters(TransferFunction.Parameters):
_validate_duration_cost_fct = get_validator_by_function(is_function_type)
duration_cost_fct_mult_param = Parameter(modulable=True,
modulation_combination_function=PRODUCT,
aliases=DURATION_COST_FCT_MULTIPLICATIVE_PARAM,
getter=_duration_cost_fct_mult_param_getter,
setter=_duration_cost_fct_mult_param_setter)
duration_cost_fct_add_param = Parameter(modulable=True,
modulation_combination_function=SUM,
aliases=DURATION_COST_FCT_ADDITIVE_PARAM,
getter=_duration_cost_fct_add_param_getter,
setter=_duration_cost_fct_add_param_setter)

Expand All @@ -3791,12 +3785,10 @@ class Parameters(TransferFunction.Parameters):
_validate_combine_costs_fct = get_validator_by_function(is_function_type)
combine_costs_fct_mult_param=Parameter(modulable=True,
modulation_combination_function=PRODUCT,
aliases=COMBINE_COSTS_FCT_MULTIPLICATIVE_PARAM,
getter=_combine_costs_fct_mult_param_getter,
setter=_combine_costs_fct_mult_param_setter)
combine_costs_fct_add_param=Parameter(modulable=True,
modulation_combination_function=SUM,
aliases=COMBINE_COSTS_FCT_ADDITIVE_PARAM,
getter=_combine_costs_fct_add_param_getter,
setter=_combine_costs_fct_add_param_setter)

Expand Down
131 changes: 38 additions & 93 deletions psyneulink/core/compositions/composition.py
Original file line number Diff line number Diff line change
Expand Up @@ -2387,12 +2387,12 @@ def input_function(env, result):
OBJECTIVE_MECHANISM, ONLINE, OUTCOME, OUTPUT, OUTPUT_CIM_NAME, OUTPUT_MECHANISM, OUTPUT_PORTS, OWNER_VALUE, \
PARAMETER, PARAMETER_CIM_NAME, PROCESSING_PATHWAY, PROJECTION, PROJECTIONS, PULSE_CLAMP, \
ROLES, SAMPLE, SHADOW_INPUTS, SIMULATIONS, SOFT_CLAMP, SSE, \
TARGET, TARGET_MECHANISM, VALUES, VARIABLE, WEIGHT
TARGET, TARGET_MECHANISM, VALUES, VARIABLE, WEIGHT, OWNER_MECH
from psyneulink.core.globals.log import CompositionLog, LogCondition
from psyneulink.core.globals.parameters import Parameter, ParametersBase
from psyneulink.core.globals.registry import register_category
from psyneulink.core.globals.utilities import \
ContentAddressableList, call_with_pruned_args, convert_to_list, convert_to_np_array, merge_dictionaries
ContentAddressableList, call_with_pruned_args, convert_to_list, convert_to_np_array
from psyneulink.core.scheduling.condition import All, Always, Condition, EveryNCalls, Never
from psyneulink.core.scheduling.scheduler import Scheduler
from psyneulink.core.scheduling.time import Time, TimeScale
Expand Down Expand Up @@ -2678,100 +2678,54 @@ def prune_feedback_edges(self):
structural_dependencies = self.dependency_dict
# wipe and reconstruct list of vertices in cycles
self.cycle_vertices = set()
flexible_edges = set()

# prune all feedback projections
for node in execution_dependencies:
# recurrent edges
# prune recurrent edges
try:
execution_dependencies[node].remove(node)
self.cycle_vertices.add(node)
except KeyError:
pass

# standard edges labeled as feedback
vert = self.comp_to_vertex[node]
execution_dependencies[node] = {
dep for dep in execution_dependencies[node]
if (
self.comp_to_vertex[dep] not in vert.source_types
or vert.source_types[self.comp_to_vertex[dep]] is not EdgeType.FEEDBACK
)
}
for dep in tuple(execution_dependencies[node]):
vert = self.comp_to_vertex[node]
dep_vert = self.comp_to_vertex[dep]

if dep_vert in vert.source_types:
# prune standard edges labeled as feedback
if vert.source_types[dep_vert] is EdgeType.FEEDBACK:
execution_dependencies[node].remove(dep)
# store flexible edges for potential pruning later
elif vert.source_types[dep_vert] is EdgeType.FLEXIBLE:
flexible_edges.add((dep, node))

# construct a parallel networkx graph to use its cycle algorithms
nx_graph = self._generate_networkx_graph(execution_dependencies)
connected_components = list(networkx.strongly_connected_components(nx_graph))

# prune only one flexible edge per attempt, to remove as few
# edges as possible
# For now, just prune the first flexible edge each time. Maybe
# look for "best" edges to prune in future by frequency in
# cycles, if that occurs
cycles_changed = True
while cycles_changed:
cycles_changed = False

# recompute cycles after each prune
for cycle in networkx.simple_cycles(nx_graph):
len_cycle = len(cycle)

for i in range(len_cycle):
parent = self.comp_to_vertex[cycle[i]]
child = self.comp_to_vertex[cycle[(i + 1) % len_cycle]]

if (
parent in child.source_types
and child.source_types[parent] is EdgeType.FLEXIBLE
):
execution_dependencies[child.component].remove(parent.component)
child.source_types[parent] = EdgeType.FEEDBACK
nx_graph.remove_edge(parent.component, child.component)
cycles_changed = True
break

def merge_intersecting_cycles(cycle_list: list) -> dict:
# transforms a cycle represented as a list [c_0, ... c_n]
# to a dependency dictionary {c_0: c_n, c_1: c_0, ..., c_n: c_{n-1}}
cycle_dicts = [
{
cycle[i]: cycle[(i - 1) % len(cycle)]
for i in range(len(cycle))
}
for cycle in cycle_list
]

new_cycles = cycle_dicts
cycles_changed = True
for parent, child in flexible_edges:
cycles = [c for c in connected_components if len(c) > 1]

# repeatedly join cycles that have a Node in common
while cycles_changed:
cycles_changed = False
i = 0
j = 1

while i < len(new_cycles):
while j < len(new_cycles):
merged, has_shared_keys = merge_dictionaries(
new_cycles[i],
new_cycles[j]
)
if has_shared_keys:
cycles_changed = True
new_cycles[i] = merged
new_cycles.remove(new_cycles[j])
else:
j += 1
i += 1

return new_cycles
if len(cycles) == 0:
break

cycles = list(networkx.simple_cycles(nx_graph))
# create the longest possible cycles using any smaller, connected cycles
cycles = merge_intersecting_cycles(cycles)
if any((parent in c and child in c) for c in cycles):
# prune
execution_dependencies[child].remove(parent)
self.comp_to_vertex[child].source_types[self.comp_to_vertex[parent]] = EdgeType.FEEDBACK
nx_graph.remove_edge(parent, child)
# recompute cycles after each prune
connected_components = list(networkx.strongly_connected_components(nx_graph))

# find all the parent nodes for each node in a cycle, excluding
# parents that are part of the cycle
for cycle in cycles:
len_cycle = len(cycle)
for cycle in [c for c in connected_components if len(c) > 1]:
acyclic_dependencies = set()

for node in cycle:
Expand Down Expand Up @@ -2803,11 +2757,14 @@ def merge_intersecting_cycles(cycle_list: list) -> dict:
structural_dependencies
)

def get_cycles(self, nx_graph: typing.Optional[networkx.DiGraph] = None):
def get_strongly_connected_components(
self,
nx_graph: typing.Optional[networkx.DiGraph] = None
):
if nx_graph is None:
nx_graph = self._generate_networkx_graph()

return list(networkx.simple_cycles(nx_graph))
return list(networkx.strongly_connected_components(nx_graph))

def _generate_networkx_graph(self, dependency_dict=None):
if dependency_dict is None:
Expand Down Expand Up @@ -3496,32 +3453,19 @@ def _update_processing_graph(self):
Constructs the processing graph (the graph that contains only Nodes as vertices)
from the composition's full graph
"""
logger.debug('Updating processing graph')

self._graph_processing = self.graph.copy()

def remove_vertex(vertex):
logger.debug('Removing', vertex)
for parent in vertex.parents:
for child in vertex.children:
child.source_types[parent] = vertex.feedback
self._graph_processing.connect_vertices(parent, child)

for node in cur_vertex.parents + cur_vertex.children:
logger.debug(
'New parents for vertex {0}: \n\t{1}\nchildren: \n\t{2}'.format(
node, node.parents, node.children
)
)

logger.debug('Removing vertex {0}'.format(cur_vertex))

self._graph_processing.remove_vertex(vertex)

# copy to avoid iteration problems when deleting
vert_list = self._graph_processing.vertices.copy()
for cur_vertex in vert_list:
logger.debug('Examining', cur_vertex)
if not cur_vertex.component.is_processing:
remove_vertex(cur_vertex)

Expand Down Expand Up @@ -5490,7 +5434,7 @@ def _check_for_unnecessary_feedback_projections(self):
labeled as EdgeType.FEEDBACK (True) but are not in a cycle
"""
unnecessary_feedback_specs = []
cycles = self.graph.get_cycles()
cycles = self.graph.get_strongly_connected_components()

for proj in self.projections:
try:
Expand Down Expand Up @@ -7282,14 +7226,14 @@ def _get_invalid_aux_components(self, controller):
if not component:
continue
if isinstance(component, Projection):
if hasattr(component.sender,'owner_mech'):
if hasattr(component.sender, OWNER_MECH):
sender_node = component.sender.owner_mech
else:
if isinstance(component.sender.owner, CompositionInterfaceMechanism):
sender_node = component.sender.owner.composition
else:
sender_node = component.sender.owner
if hasattr(component.receiver, 'owner_mech'):
if hasattr(component.receiver, OWNER_MECH):
receiver_node = component.receiver.owner_mech
else:
if isinstance(component.receiver.owner, CompositionInterfaceMechanism):
Expand Down Expand Up @@ -7362,7 +7306,7 @@ def _check_projection_initialization_status(self, context=None):
invalid_aux_components = self._get_invalid_aux_components(self.controller)
for component in invalid_aux_components:
if isinstance(component, Projection):
if hasattr(component.receiver, 'owner_mech'):
if hasattr(component.receiver, OWNER_MECH):
owner = component.receiver.owner_mech
else:
owner = component.receiver.owner
Expand Down Expand Up @@ -8753,6 +8697,7 @@ def execute(

# ASSIGNMENTS **************************************************************************************************

assert not str(bin_execute).endswith("Run")
if bin_execute == 'Python':
bin_execute = False

Expand Down
1 change: 1 addition & 0 deletions psyneulink/core/globals/keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -722,6 +722,7 @@ def _is_metric(metric):
port_value = "Port value" # Used in Port specification dict to specify Port value
port_params = "Port params" # Used in Port specification dict to specify Port params

OWNER_MECH = 'owner_mech'
#endregion

#region ---------------------------------------- MODULATORY MECHANISMS ----------------------------------------------
Expand Down
40 changes: 24 additions & 16 deletions tests/composition/test_autodiffcomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,14 +291,16 @@ def test_pytorch_loss_spec(self, mode):

@pytest.mark.benchmark(group="Optimizer specs")
@pytest.mark.parametrize(
'learning_rate, weight_decay, optimizer_type', [
(10, 0, 'sgd'), (1.5, 1, 'sgd'), (1.5, 1, 'adam'),
'learning_rate, weight_decay, optimizer_type, expected', [
(10, 0, 'sgd', [[[0.9863038667851067]], [[0.9944287263151904]], [[0.9934801466163382]], [[0.9979153035411085]]]),
(1.5, 1, 'sgd', None),
(1.5, 1, 'adam', None),
]
)
@pytest.mark.parametrize("mode", ['Python',
pytest.param('LLVMRun', marks=pytest.mark.llvm),
])
def test_optimizer_specs(self, learning_rate, weight_decay, optimizer_type, mode, benchmark):
def test_optimizer_specs(self, learning_rate, weight_decay, optimizer_type, expected, mode, benchmark):
xor_in = TransferMechanism(name='xor_in',
default_variable=np.zeros(2))

Expand Down Expand Up @@ -335,12 +337,16 @@ def test_optimizer_specs(self, learning_rate, weight_decay, optimizer_type, mode
# targets={xor_out:xor_targets},
# epochs=10)
results_before_proc = xor.learn(inputs={"inputs": {xor_in:xor_inputs},
"targets": {xor_out:xor_targets},
"epochs": 10}, bin_execute=mode)
"targets": {xor_out:xor_targets},
"epochs": 10}, bin_execute=mode)

if expected is not None:
assert np.allclose(results_before_proc, expected)

benchmark(xor.learn, inputs={"inputs": {xor_in:xor_inputs},
"targets": {xor_out:xor_targets},
"epochs": 10}, bin_execute=mode)
if benchmark.enabled:
benchmark(xor.learn, inputs={"inputs": {xor_in:xor_inputs},
"targets": {xor_out:xor_targets},
"epochs": 10}, bin_execute=mode)


# test whether pytorch parameters and projections are kept separate (at diff. places in memory)
Expand Down Expand Up @@ -469,9 +475,10 @@ def test_xor_training_correctness(self, eps, calls, opt, mode, benchmark, expect
for r, t in zip(results, expected):
assert np.allclose(r[0], t)

benchmark(xor.learn, inputs={"inputs": {xor_in: xor_inputs},
"targets": {xor_out: xor_targets},
"epochs": eps}, bin_execute=mode)
if benchmark.enabled:
benchmark(xor.learn, inputs={"inputs": {xor_in: xor_inputs},
"targets": {xor_out: xor_targets},
"epochs": eps}, bin_execute=mode)


# tests whether semantic network created as autodiff composition learns properly
Expand Down Expand Up @@ -775,13 +782,14 @@ def test_semantic_net_training_correctness(self, eps, opt, mode, benchmark):
for res, exp in zip(results, expected):
for r, e in zip(res, exp):
assert np.allclose(r, e)
benchmark(sem_net.learn, inputs={'inputs': inputs_dict,
'targets': targets_dict,
'epochs': eps}, bin_execute=mode)
if benchmark.enabled:
benchmark(sem_net.learn, inputs={'inputs': inputs_dict,
'targets': targets_dict,
'epochs': eps}, bin_execute=mode)

@pytest.mark.parametrize("mode", ['Python',
pytest.param('LLVMRun', marks=pytest.mark.llvm),
])
pytest.param('LLVMRun', marks=pytest.mark.llvm),
])
def test_pytorch_equivalence_with_autodiff_composition(self, mode):
iSs = np.array(
[np.array([0.47360805, 0.8009108, 0.5204775, 0.53737324, 0.7586156,
Expand Down
Loading

0 comments on commit 86a09ca

Please sign in to comment.