Skip to content

Commit c5f0b3a

Browse files
committed
Compatible with MindSpore1.6.1 and multiple back-end Computation Graph
1 parent f9979c7 commit c5f0b3a

File tree

9 files changed

+292
-170
lines changed

9 files changed

+292
-170
lines changed

requirements/requirements_ms.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
mindspore==1.5.2
1+
mindspore==1.6.1

tensorlayerx/backend/ops/load_backend.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@
5959
import os
6060
os.environ['DEVICE_ID'] = '0'
6161
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU'),
62-
# context.set_context(mode=context.GRAPH_MODE, device_target='CPU'),
62+
# context.set_context(mode=context.PYNATIVE_MODE, device_target='CPU'),
6363
# enable_task_sink=True, enable_loop_sink=True)
6464
# context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
6565
sys.stderr.write('Using MindSpore backend.\n')

tensorlayerx/backend/ops/mindspore_nn.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from mindspore.ops.operations import LayerNorm
2020
import mindspore.numpy as np
2121
from mindspore.common.parameter import ParameterTuple
22-
from mindspore.nn.layer.rnns import _DynamicRNN
22+
from mindspore.nn.layer.rnns import _DynamicRNNBase
2323
import warnings
2424
import math
2525

@@ -1953,7 +1953,7 @@ def __init__(
19531953
self.w_hh_list = ParameterTuple(w_hh)
19541954
self.b_ih_list = ParameterTuple(b_ih)
19551955
self.b_hh_list = ParameterTuple(b_hh)
1956-
self.rnn = _DynamicRNN(mode)
1956+
self.rnn = _DynamicRNNBase(mode)
19571957
self.is_lstm = mode == "LSTM"
19581958

19591959
self.zeros = P.Zeros()

tensorlayerx/nn/core/common.py

Lines changed: 110 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from tensorlayerx.files import utils
77
from tensorlayerx import logging
88
import numpy as np
9+
from queue import Queue
910
from tensorlayerx.nn.initializers import *
1011

1112
if tlx.BACKEND == 'mindspore':
@@ -445,4 +446,112 @@ def tolist(tensors):
445446
ntensors += tolist(t)
446447
return ntensors
447448
else:
448-
return [tensors]
449+
return [tensors]
450+
451+
def construct_graph(inputs, outputs):
452+
"""construct computation graph for model using ModuleNode object"""
453+
all_layers = []
454+
node_by_depth = []
455+
456+
input_tensors_list = inputs if isinstance(inputs, list) else inputs
457+
458+
queue_node = Queue()
459+
# BFS to visit all nodes that should be involved in the computation graph
460+
output_tensors_list = outputs if isinstance(outputs, list) else [outputs]
461+
output_nodes = [tensor._info[0] for tensor in output_tensors_list]
462+
463+
visited_node_names = set()
464+
for out_node in output_nodes:
465+
if out_node.visited:
466+
continue
467+
queue_node.put(out_node)
468+
469+
while not queue_node.empty():
470+
cur_node = queue_node.get()
471+
in_nodes = cur_node.in_nodes
472+
473+
for node in in_nodes:
474+
node.out_nodes.append(cur_node)
475+
if not node.visited:
476+
queue_node.put(node)
477+
node.visited = True
478+
if node.node_name not in visited_node_names:
479+
visited_node_names.add(node.node_name)
480+
# else have multiple layers with the same name
481+
else:
482+
raise ValueError(
483+
'Layer name \'%s\' has already been used by another layer. Please change the layer name.'
484+
% node.layer.name
485+
)
486+
487+
# construct the computation graph in top-sort order
488+
cur_depth = [tensor._info[0] for tensor in input_tensors_list]
489+
next_depth = []
490+
indegrees = {}
491+
492+
visited_layer_names = []
493+
while not len(cur_depth) == 0:
494+
node_by_depth.append(cur_depth)
495+
for node in cur_depth:
496+
if node.layer.name not in visited_layer_names:
497+
all_layers.append(node.layer)
498+
visited_layer_names.append(node.layer.name)
499+
for out_node in node.out_nodes:
500+
if out_node.node_name not in indegrees.keys():
501+
indegrees[out_node.node_name] = len(out_node.in_nodes)
502+
indegrees[out_node.node_name] -= 1
503+
if indegrees[out_node.node_name] == 0:
504+
next_depth.append(out_node)
505+
cur_depth = next_depth
506+
next_depth = []
507+
return node_by_depth, all_layers
508+
509+
510+
class ModuleNode(object):
511+
"""
512+
The class :class:`ModuleNode` class represents a conceptional node for a layer.
513+
514+
ModuleNode is used for building topology and it is actually a light weighted
515+
wrapper over Layer.
516+
517+
Parameters
518+
----------
519+
layer : tl.layers.Layer
520+
A tl layer that wants to create a node.
521+
node_index : int
522+
Index of this node in layer._nodes.
523+
in_nodes :a list of ModuleNode
524+
Father nodes to this node.
525+
in_tensors : a list of tensors
526+
Input tensors to this node.
527+
out_tensors : a list of tensors
528+
Output tensors to this node.
529+
in_tensor_idxes : a list of int
530+
Indexes of each input tensor in its corresponding node's out_tensors.
531+
532+
Methods
533+
---------
534+
__init__()
535+
Initializing the ModuleNode.
536+
__call__()
537+
(1) Forwarding through the layer. (2) Update its input/output tensors.
538+
"""
539+
540+
def __init__(self, layer, node_index, in_nodes, in_tensors, out_tensors, in_tensor_idxes):
541+
self.layer = layer
542+
self.node_index = node_index
543+
self.in_nodes = in_nodes
544+
self.out_nodes = []
545+
self.in_tensors = in_tensors
546+
self.out_tensors = out_tensors
547+
self.node_name = layer.name + "_node_{}".format(node_index)
548+
549+
self.in_tensors_idxes = in_tensor_idxes
550+
self.visited = False
551+
552+
def __call__(self, inputs, **kwargs):
553+
"""(1) Forwarding through the layer. (2) Update its input/output tensors."""
554+
outputs = self.layer(inputs, **kwargs)
555+
self.in_tensors = tolist(inputs)
556+
self.out_tensors = tolist(outputs)
557+
return self.out_tensors

tensorlayerx/nn/core/core_mindspore.py

Lines changed: 69 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
#! /usr/bin/python
22
# -*- coding: utf-8 -*-
33

4-
from .common import str2act, str2init, random_normal
4+
from .common import str2act, str2init, random_normal, tolist, construct_graph, ModuleNode
55
from .common import _save_weights, _load_weights, _save_standard_weights_dict, _load_standard_weights_dict
66
from mindspore.nn import Cell
77
import tensorlayerx as tlx
8-
from collections import OrderedDict
8+
import mindspore as ms
99
from mindspore import log as logger
1010
import inspect
1111
from mindspore import context
@@ -16,6 +16,7 @@
1616
__all__ = ['Module', 'Sequential', 'ModuleList', 'ModuleDict']
1717

1818
_global_layer_name_dict = {}
19+
_global_layer_node = []
1920

2021

2122
class Module(Cell):
@@ -138,41 +139,40 @@ def _compute_shape(tensors):
138139
shape_mem = tlx.get_tensor_shape(tensors)
139140
return shape_mem
140141

141-
def __call__(self, *inputs, **kwargs):
142+
def __call__(self, *args, **kwargs):
142143
if self.__class__.construct is Cell.construct:
143-
logger.warning(
144-
f"The '{self.__class__}' does not override the method 'construct', "
145-
f"will call the super class(Cell) 'construct'."
146-
)
144+
logger.warning(f"The '{self.__class__}' does not override the method 'construct', "
145+
f"will call the super class(Cell) 'construct'.")
147146
if kwargs:
148-
bound_args = inspect.signature(self.construct).bind(*inputs, **kwargs)
149-
inputs = bound_args.args
150-
kwargs = bound_args.kwargs
147+
bound_arguments = inspect.signature(self.construct).bind(*args, **kwargs)
148+
bound_arguments.apply_defaults()
149+
args = bound_arguments.args
150+
kwargs = bound_arguments.kwargs
151151

152152
# Run in Graph mode.
153-
if context.get_context("mode") == context.GRAPH_MODE:
154-
raise NotImplemented("GRAPH MODE is not supported, please select PYNATIVE MODE.")
153+
if context._get_mode() == context.GRAPH_MODE:
154+
self._check_construct_args(*args, **kwargs)
155+
if self.enable_hook:
156+
raise ValueError("For 'Cell', it's not support hook function in graph mode, please use "
157+
"context.set_context to set pynative mode.")
158+
out = self.compile_and_run(*args)
159+
return out
155160

156161
# Run in PyNative mode.
157162
if _pynative_executor.is_top_cell():
158163
_pynative_executor.set_lazy_build(True)
159164
# There many Casts in parameter_broadcast. Enable lazy_build and build faster.
160165
self._do_parameter_broadcast()
161166

162-
for item in inputs:
163-
if isinstance(item, numpy.ndarray):
164-
raise TypeError("The cell inputs should not be numpy arrays.")
167+
for item in args:
168+
if isinstance(item, ms.Tensor) and item.has_init:
169+
item.init_data()
170+
elif isinstance(item, numpy.ndarray):
171+
raise TypeError("For 'Cell', inputs should not be numpy array.")
165172
if self.requires_grad is True:
166173
_pynative_executor.set_grad_flag(True)
167-
_pynative_executor.new_graph(self, *inputs, **kwargs)
168-
cast_inputs = list()
169-
if hasattr(self, "_mindspore_flags"):
170-
if self._mindspore_flags.get('fp16'):
171-
cast_inputs = self._cast_mixed_precision_inputs(inputs, tlx.float16)
172-
if self._mindspore_flags.get('fp32'):
173-
cast_inputs = self._cast_mixed_precision_inputs(inputs, tlx.float32)
174-
if not cast_inputs:
175-
cast_inputs = inputs
174+
_pynative_executor.new_graph(self, *args, **kwargs)
175+
cast_inputs = self.auto_cast_inputs(args)
176176

177177
with self.CellGuard():
178178
try:
@@ -182,29 +182,13 @@ def __call__(self, *inputs, **kwargs):
182182
raise err
183183

184184
if _pynative_executor.is_top_cell():
185-
_pynative_executor.execute_all_task()
185+
_pynative_executor.execute_lazy_task()
186186

187187
if isinstance(output, Parameter):
188188
output = output.data
189-
_pynative_executor.end_graph(self, output, *inputs, **kwargs)
189+
_pynative_executor.end_graph(self, output, *args, **kwargs)
190190
return output
191191

192-
def _add_node(self, input_tensors, output_tensors):
193-
"""Add a LayerNode for this layer given input_tensors, output_tensors.
194-
195-
WARINING: This function should not be called from outside, it should only be called
196-
in layer.__call__ when building static model.
197-
198-
Parameters
199-
----------
200-
input_tensors : Tensor or a list of tensors
201-
Input tensors to this layer.
202-
output_tensors : Tensor or a list of tensors
203-
Output tensors to this layer.
204-
205-
"""
206-
raise NotImplementedError
207-
208192
def set_train(self):
209193
"""
210194
Sets the cell to training mode.
@@ -310,6 +294,49 @@ def init_build(self, *inputs, **kwargs):
310294

311295
self.forward(*inputs, **kwargs)
312296

297+
def build_graph(self, *inputs, **kwargs):
298+
# Add nodes only when the composition is needed.
299+
layers = self.cells_and_names(name_prefix='')
300+
for layer_name, layer in layers:
301+
if isinstance(layer, Module):
302+
layer._build_graph = True
303+
304+
outputs = self.forward(*inputs, **kwargs)
305+
self.inputs = inputs
306+
self.outputs = outputs
307+
self._node_by_depth, self._all_layers = construct_graph(self.inputs, self.outputs)
308+
return self._node_by_depth, self._all_layers
309+
310+
def _add_node(self, input_tensors, output_tensors):
311+
"""Add a ModuleNode for this layer given input_tensors, output_tensors.
312+
313+
This function should not be called from outside, it should only be called
314+
in __call__ when building static model.
315+
316+
Parameters
317+
----------
318+
input_tensors : Tensor or a list of tensors
319+
Input tensors to this layer.
320+
output_tensors : Tensor or a list of tensors
321+
Output tensors to this layer.
322+
323+
"""
324+
inputs_list = tolist(input_tensors)
325+
outputs_list = tolist(output_tensors)
326+
if self.__class__.__name__ in tlx.layers.inputs.__all__:
327+
# for InputLayer, there should be no in_nodes
328+
in_nodes = []
329+
in_tensor_idxes = [0]
330+
else:
331+
in_nodes = [tensor._info[0] for tensor in inputs_list]
332+
in_tensor_idxes = [tensor._info[1] for tensor in inputs_list]
333+
node_index = len(_global_layer_node)
334+
335+
new_node = ModuleNode(self, node_index, in_nodes, inputs_list, outputs_list, in_tensor_idxes)
336+
_global_layer_node.append(new_node)
337+
for idx, tensor in enumerate(outputs_list):
338+
tensor._info = (new_node, idx)
339+
313340

314341
class Sequential(Module):
315342
"""

tensorlayerx/nn/core/core_paddle.py

Lines changed: 49 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
# -*- coding: utf-8 -*-
33

44
import copy, six
5-
from .common import str2act, str2init
5+
from .common import str2act, str2init, tolist, construct_graph, ModuleNode
66
from .common import _save_weights, _load_weights, _save_standard_weights_dict, _load_standard_weights_dict
77
from paddle.fluid import framework
88
from paddle.fluid.dygraph import Layer
@@ -12,7 +12,11 @@
1212
import paddle as pd
1313
from collections import OrderedDict
1414
from collections import OrderedDict, abc as container_abcs
15+
import tensorlayerx as tlx
16+
from queue import Queue
17+
1518
_global_layer_name_dict = {}
19+
_global_layer_node = []
1620

1721
__all__ = ['Module', 'Sequential', 'ModuleList', 'ModuleDict']
1822

@@ -305,6 +309,50 @@ def insert_child_to_layer(self, child_name, child):
305309
raise TypeError("Child layer type is incorrect.")
306310
self._sub_layers[child_name] = child
307311

312+
def build_graph(self, *inputs, **kwargs):
313+
# Add nodes only when the composition is needed.
314+
# for layer in self.sublayers():
315+
# if isinstance(layer, Module):
316+
# layer._build_graph = True
317+
#
318+
# outputs = self.forward(*inputs, **kwargs)
319+
# self.inputs = inputs
320+
# self.outputs = outputs
321+
# self._node_by_depth, self._all_layers = construct_graph(self.inputs, self.outputs)
322+
# return self._node_by_depth, self._all_layers
323+
raise NotImplementedError
324+
325+
def _add_node(self, input_tensors, output_tensors):
326+
"""Add a ModuleNode for this layer given input_tensors, output_tensors.
327+
328+
This function should not be called from outside, it should only be called
329+
in __call__ when building static model.
330+
331+
Parameters
332+
----------
333+
input_tensors : Tensor or a list of tensors
334+
Input tensors to this layer.
335+
output_tensors : Tensor or a list of tensors
336+
Output tensors to this layer.
337+
338+
"""
339+
pass
340+
# inputs_list = tolist(input_tensors)
341+
# outputs_list = tolist(output_tensors)
342+
# if self.__class__.__name__ in tlx.layers.inputs.__all__:
343+
# # for InputLayer, there should be no in_nodes
344+
# in_nodes = []
345+
# in_tensor_idxes = [0]
346+
# else:
347+
# in_nodes = [tensor[0] for tensor in inputs_list]
348+
# in_tensor_idxes = [tensor[1] for tensor in inputs_list]
349+
# node_index = len(_global_layer_node)
350+
#
351+
# new_node = ModuleNode(self, node_index, in_nodes, inputs_list, outputs_list, in_tensor_idxes)
352+
# _global_layer_node.append(new_node)
353+
# for idx, tensor in enumerate(outputs_list):
354+
# tensor._info = (new_node, idx)
355+
308356

309357
class Sequential(Module):
310358

0 commit comments

Comments
 (0)