Skip to content

Commit eabe79e

Browse files
grybdhengzitangnana925chengtbfoneflow-ci-bot
authored
[Fix] graph support 0-Size tensor (#6957)
* Add nn.functional.glu graph test * add filter to motify functional autotest * motify code * add test example * add test else * add test judging condition for test_masked_fill.py,test_constant.py,test_tile.py、test_repeat.py,test_expand.py * add test ok example * Clear tensor name scope after graph build * Add test case of 2 graph caught same free eager tensor * auto format by CI * Dev cc clean tensor name scope (#7082) * Clear tensor name scope after graph build * Add test case of 2 graph caught same free eager tensor * auto format by CI Co-authored-by: chengtbf <472491134@qq.com> Co-authored-by: oneflow-ci-bot <ci-bot@oneflow.org> * submit test success example * test success example * submit test code * fix a bug about relu module with 0 shape data * fixed a bug about relu module with 0 shape data * fix a bug about relu module with 0 shape data * fix a bug about relu module with 0 shape data * 0shape and 0d autotest * fix a bug about relu module with 0 shape data * 0shape changed to 0_size * modify test_var.py * modify test_eye.py * modify test_reshape.py * modify test_.py * modify ReshapeFunctor * modify some file * Fixed graph autotest bug with reshape op test * Fixed graph autotest bug with reshape op test * fixed test_sub.py * modify test_sub.py * modify tensor_methods.cpp * modify array_functor.cpp * graph support 0-Size tensor * rename 0shape to 0 size * modified check_graph=True * fix and refine Co-authored-by: Zhenhua <huangzhenhua@zhejianglab.com> Co-authored-by: tangnana925 <85614052+tangnana925@users.noreply.github.com> Co-authored-by: tangnana <tnn_personal@163.com> Co-authored-by: Zhenhua <1209435+hengzi@users.noreply.github.com> Co-authored-by: chengtbf <472491134@qq.com> Co-authored-by: oneflow-ci-bot <ci-bot@oneflow.org> Co-authored-by: Xiaoyu Xu <xiaoyulink@gmail.com> Co-authored-by: oneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
1 parent de9fc41 commit eabe79e

28 files changed

+60
-60
lines changed

oneflow/core/functional/impl/array_functor.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -911,7 +911,7 @@ class ReshapeFunctor {
911911
}
912912
Maybe<Tensor> operator()(const std::shared_ptr<one::Tensor>& x, const Shape& shape) const {
913913
// if input tensor is eager local, than return tensor's view
914-
if (x->is_eager() && x->is_local()) { return view::Reshape(x, shape); }
914+
if (x->is_local() && !(LazyMode::is_enabled())) { return view::Reshape(x, shape); }
915915
int need_infer_axis = -1;
916916
size_t count = 1;
917917
for (int i = 0; i < shape.NumAxes(); ++i) {

oneflow/core/job/plan_util.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ void GenChunkForMultiNNGraphMemoryReuseInMultiClient(
163163
CHECK_LE(current_chunk_offset + mem_block->mem_size(), chunk->mem_size());
164164
CHECK_GE(current_chunk_offset, 0);
165165
// CHECK_GT(mem_block->mem_size(), 0); NOTE(chengcheng): has mem block mem size = 0
166-
CHECK_GT(chunk->mem_size(), 0);
166+
CHECK_GE(chunk->mem_size(), 0);
167167
mem_block->set_chunk_id(chunk->chunk_id());
168168
mem_block->set_chunk_offset(current_chunk_offset);
169169
current_chunk_offset += mem_block->mem_size();

oneflow/core/operator/interface_op_util.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ namespace oneflow {
2121
namespace {
2222

2323
void CheckShape(const Shape& shape) {
24-
FOR_RANGE(int, i, 1, shape.NumAxes()) { CHECK_GT(shape.At(i), 0); }
24+
FOR_RANGE(int, i, 1, shape.NumAxes()) { CHECK_GE(shape.At(i), 0); }
2525
}
2626

2727
Maybe<void> GetSbpSignature(const InterfaceBlobConf& blob_conf, const PbRpf<std::string>& input_bns,

python/oneflow/test/modules/test_abs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
@flow.unittest.skip_unless_1n1d()
2525
class TestAbsModule(flow.unittest.TestCase):
2626
@autotest(check_graph=True)
27-
def test_abs_with_0shape_data(test_case):
27+
def test_abs_with_0_size_data(test_case):
2828
device = random_device()
2929
x = random_pytorch_tensor().to(device)
3030
y = torch.abs(x)

python/oneflow/test/modules/test_activation.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@ def test_relu_module_with_random_data(test_case):
3939
y = m(x)
4040
return y
4141

42-
@autotest(auto_backward=False, check_graph=False)
43-
def test_relu_module_with_0shape_data(test_case):
42+
@autotest(auto_backward=False, check_graph=True)
43+
def test_relu_module_with_0_size_data(test_case):
4444
m = torch.nn.ReLU()
4545
m.train(random())
4646
device = random_device()
@@ -62,8 +62,8 @@ def test_relu6_module_with_random_data(test_case):
6262
y = m(x)
6363
return y
6464

65-
@autotest(auto_backward=False, check_graph=False)
66-
def test_relu6_module_with_0shape_data(test_case):
65+
@autotest(auto_backward=False, check_graph=True)
66+
def test_relu6_module_with_0_size_data(test_case):
6767
m = torch.nn.ReLU6()
6868
m.train(random())
6969
device = random_device()
@@ -85,8 +85,8 @@ def test_tanh_module_with_random_data(test_case):
8585
y = m(x)
8686
return y
8787

88-
@autotest(auto_backward=False, check_graph=False)
89-
def test_tanh_module_with_0shapedata(test_case):
88+
@autotest(auto_backward=False, check_graph=True)
89+
def test_tanh_module_with_0_size_data(test_case):
9090
m = torch.nn.Tanh()
9191
m.train(random())
9292
device = random_device()
@@ -102,8 +102,8 @@ def test_flow_tanh_with_random_data(test_case):
102102
y = torch.tanh(x)
103103
return y
104104

105-
@autotest(auto_backward=False, check_graph=False)
106-
def test_flow_tanh_with_0shape_data(test_case):
105+
@autotest(auto_backward=False, check_graph=True)
106+
def test_flow_tanh_with_0_size_data(test_case):
107107
device = random_device()
108108
x = random_pytorch_tensor(4, 2, 3, 0, 3).to(device)
109109
y = torch.tanh(x)
@@ -122,8 +122,8 @@ def test_elu_module_with_random_data(test_case):
122122
y = m(x)
123123
return y
124124

125-
@autotest(auto_backward=False, check_graph=False)
126-
def test_elu_module_with_0shape_data(test_case):
125+
@autotest(auto_backward=False, check_graph=True)
126+
def test_elu_module_with_0_size_data(test_case):
127127
m = torch.nn.ELU(alpha=random() | nothing())
128128
m.train(random())
129129
device = random_device()
@@ -145,8 +145,8 @@ def test_celu_module_with_random_data(test_case):
145145
y = m(x)
146146
return y
147147

148-
@autotest(auto_backward=False, check_graph=False)
149-
def test_celu_module_with_0shape_data(test_case):
148+
@autotest(auto_backward=False, check_graph=True)
149+
def test_celu_module_with_0_size_data(test_case):
150150
m = torch.nn.CELU(alpha=random() | nothing())
151151
m.train(random())
152152
device = random_device()

python/oneflow/test/modules/test_add.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -170,8 +170,8 @@ def test_add(test_case):
170170
for arg in GenArgList(arg_dict):
171171
arg[0](test_case, *arg[1:])
172172

173-
@autotest(check_graph=False)
174-
def test_0shape_add(test_case):
173+
@autotest(check_graph=True)
174+
def test_0_size_add(test_case):
175175
device = random_device()
176176
x = random_pytorch_tensor(2, 0, 3).to(device)
177177
y = random_pytorch_tensor(2, 1, 3).to(device)

python/oneflow/test/modules/test_cast.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def test_cast(test_case):
6666
for arg in GenArgList(arg_dict):
6767
arg[0](test_case, *arg[1:])
6868

69-
def test_cast_with_0shape_data(test_case):
69+
def test_cast_with_0_size_data(test_case):
7070
arg_dict = OrderedDict()
7171
arg_dict["test_fun"] = [
7272
_test_cast_float2int,

python/oneflow/test/modules/test_ceil.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@ def test_ceil_flow_with_random_data(test_case):
3232
y = torch.ceil(input)
3333
return y
3434

35-
@autotest(auto_backward=False, check_graph=False)
36-
def test_ceil_with_0shape_data(test_case):
35+
@autotest(auto_backward=False, check_graph=True)
36+
def test_ceil_with_0_size_data(test_case):
3737
device = random_device()
3838
x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
3939
y = torch.ceil(x)

python/oneflow/test/modules/test_clamp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -154,8 +154,8 @@ def test_clip_max_none_flow_with_random_data(test_case):
154154
)
155155
return y
156156

157-
@autotest(auto_backward=False, check_graph=False)
158-
def test_clamp_with_0shape_data(test_case):
157+
@autotest(auto_backward=False, check_graph=True)
158+
def test_clamp_with_0_size_data(test_case):
159159
device = random_device()
160160
x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
161161
y = torch.clamp(x, min=random().to(float), max=random().to(float))

python/oneflow/test/modules/test_concat.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -140,16 +140,16 @@ def test_cat_with_random_data(test_case):
140140
x = random_pytorch_tensor(ndim=2, dim0=random(), dim1=random()).to(device)
141141
return torch.cat((x, x, x), random(0, 2).to(int))
142142

143-
@autotest(n=10, auto_backward=False, check_graph=False)
144-
def test_concat_with_input_0shape_data(test_case):
143+
@autotest(n=10, auto_backward=False, check_graph=True)
144+
def test_concat_with_input_0_size_data(test_case):
145145
device = random_device()
146146
x = random_pytorch_tensor(4, 2, 3, 2, 4).to(device)
147147
y = random_pytorch_tensor(4, 2, 3, random(0, 3), 4).to(device)
148148
z = torch.cat((x, y), dim=2)
149149
return z
150150

151-
@autotest(n=10, auto_backward=False, check_graph=False)
152-
def test_concat_with_output_0shape_data(test_case):
151+
@autotest(n=10, auto_backward=False, check_graph=True)
152+
def test_concat_with_output_0_size_data(test_case):
153153
device = random_device()
154154
x = random_pytorch_tensor(4, 2, 0, 2, 4).to(device)
155155
y = random_pytorch_tensor(4, 2, 0, 2, 4).to(device)

0 commit comments

Comments
 (0)