Skip to content

Commit 85aea96

Browse files
authored
Reduce the complete storage & reading thresholds for int and float from 1024 to 64 (#180)
* CONTRIBUTE_TUTORIAL_cn.md * Handle big int tensors by converting to sparse COO * Update utils * Update utils * Update utils * Update utils
1 parent 58efcc6 commit 85aea96

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

graph_net/torch/utils.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def process_tensor(tensor):
5252

5353
info = tensor_info(tensor)
5454
if tensor.dtype in [torch.int8, torch.int16, torch.int32, torch.int64]:
55-
if tensor.numel() < 1024:
55+
if tensor.numel() < 64:
5656
return {
5757
"type": "small_int_tensor",
5858
"data": tensor.clone(),
@@ -65,7 +65,7 @@ def process_tensor(tensor):
6565
"max_val": tensor.max().item(),
6666
"info": info,
6767
}
68-
elif tensor.numel() < 1024:
68+
elif tensor.numel() < 64:
6969
return {"type": "small_tensor", "data": tensor.clone(), "info": info}
7070
else:
7171
return {"type": "random_tensor", "info": info}
@@ -80,7 +80,7 @@ def process_tensor(tensor):
8080
def handle_named_tensors(tensor):
8181
info = tensor_info(tensor)
8282
if tensor.dtype in [torch.int8, torch.int16, torch.int32, torch.int64]:
83-
if tensor.numel() < 1024:
83+
if tensor.numel() < 64:
8484
return {
8585
"info": info,
8686
"data": tensor.clone(),
@@ -93,7 +93,7 @@ def handle_named_tensors(tensor):
9393
"max_val": tensor.max().item(),
9494
"type": "big_int_tensor_by_range",
9595
}
96-
if tensor.numel() < 1024:
96+
if tensor.numel() < 64:
9797
return {"info": info, "data": tensor.clone(), "type": "small_tensor"}
9898
else:
9999
return {"info": info, "data": None, "type": "random_tensor"}

0 commit comments

Comments
 (0)