Skip to content

Commit d4830c8

Browse files
committed
Merge branch 'develop' into opt_test_compiler_check
2 parents 70386d1 + f8d61c2 commit d4830c8

File tree

234 files changed

+260692
-9
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

234 files changed

+260692
-9
lines changed

graph_net/paddle/test_compiler.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -318,12 +318,12 @@ def get_cmp_diff_count(expected_out, compiled_out, atol, rtol):
318318

319319
def test_multi_models(args):
320320
verified_samples = None
321-
if args.verified_samples_path is not None:
322-
assert os.path.isfile(args.verified_samples_path)
321+
if args.verified_samples_list_path is not None:
322+
assert os.path.isfile(args.verified_samples_list_path)
323323
graphnet_root = path_utils.get_graphnet_root()
324324
print(f"graphnet_root: {graphnet_root}")
325325
verified_samples = []
326-
with open(args.verified_samples_path, "r") as f:
326+
with open(args.verified_samples_list_path, "r") as f:
327327
for line in f.readlines():
328328
verified_samples.append(os.path.join(graphnet_root, line.strip()))
329329

@@ -406,7 +406,7 @@ def main(args):
406406
help="Log prompt for performance log filtering.",
407407
)
408408
parser.add_argument(
409-
"--verified-samples-path",
409+
"--verified-samples-list-path",
410410
type=str,
411411
required=False,
412412
default=None,

graph_net/paddle/validate.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def _extract_forward_source(model_path, class_name):
4949
def check_graph_hash(args):
5050
model_path = args.model_path
5151
file_path = f"{model_path}/graph_hash.txt"
52-
if args.dump_graph_hash_key:
52+
if not args.no_dump_graph_hash_key:
5353
model_str = _extract_forward_source(model_path, class_name="GraphModule")
5454
assert model_str is not None, f"model_str of {args.model_path} is None."
5555
new_hash_text = _get_sha_hash(model_str)
@@ -128,9 +128,9 @@ def main(args):
128128
help="whether check model graph redundancy",
129129
)
130130
parser.add_argument(
131-
"--dump-graph-hash-key",
131+
"--no-dump-graph-hash-key",
132132
action="store_true",
133-
default=True,
133+
default=False,
134134
help="Dump graph hash key",
135135
)
136136
parser.add_argument(

graph_net/test/nlp_model_getter.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,3 +107,50 @@ def get_skep_model_and_inputs(model_name, text, dtype):
107107
tokenizer = TokenizerClass.from_pretrained(model_name)
108108
inputs = tokenizer(text, return_tensors="pd")
109109
return model, inputs
110+
111+
112+
def get_bart_model_and_inputs(model_name, text, dtype):
113+
from paddlenlp.transformers import BartModel, BartTokenizer
114+
115+
model = BartModel.from_pretrained(model_name)
116+
model.eval()
117+
118+
tokenizer = BartTokenizer.from_pretrained(model_name)
119+
120+
inputs = tokenizer(
121+
text,
122+
return_tensors="pd",
123+
padding=True,
124+
truncation=True,
125+
max_length=512,
126+
)
127+
inputs.pop("token_type_ids", None)
128+
129+
return model, inputs
130+
131+
132+
def get_xlnet_model_and_inputs(model_name, text, dtype):
133+
import paddle
134+
from paddlenlp.transformers import XLNetModel, XLNetTokenizer, XLNetConfig
135+
136+
config = XLNetConfig.from_pretrained(model_name)
137+
model = XLNetModel(config)
138+
if dtype == "float16":
139+
model = model.astype(paddle.float16)
140+
model.eval()
141+
142+
tokenizer = XLNetTokenizer.from_pretrained(model_name)
143+
144+
enc = tokenizer(
145+
text,
146+
return_tensors="pd",
147+
padding=True,
148+
truncation=True,
149+
# max_length=512,
150+
)
151+
if "attention_mask" not in enc:
152+
input_ids = enc["input_ids"]
153+
pad_id = tokenizer.pad_token_id
154+
enc["attention_mask"] = (input_ids != pad_id).astype("int64")
155+
156+
return model, enc

graph_net/test_compiler_util.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ def tolerance_generator(t):
170170
yield 10 ** (t * 7 / 5), 10 ** (t * 7 / 5)
171171

172172

173-
def compute_tolerance_pair(begin, end):
173+
def calculate_tolerance_pair(begin, end):
174174
tolerance_pair_list = []
175175
for t in range(begin, end + 1):
176176
for rtol, atol in tolerance_generator(t):
@@ -186,7 +186,7 @@ def compute_tolerance_pair(begin, end):
186186

187187

188188
def generate_allclose_configs(cmp_all_close_func):
189-
tolerance_pair_list = compute_tolerance_pair(-10, 5)
189+
tolerance_pair_list = calculate_tolerance_pair(-10, 5)
190190

191191
cmp_configs = []
192192
for pair in tolerance_pair_list:
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"framework": "paddle",
3+
"model_name": "albert-base-v1",
4+
"num_devices_required": 1,
5+
"num_nodes_required": 1
6+
}
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
class Program_weight_tensor_data_0:
2+
name = "data_0"
3+
shape = [1, 21]
4+
dtype = "int64"
5+
data = [
6+
2,
7+
10975,
8+
15,
9+
51,
10+
204,
11+
25,
12+
1909,
13+
9,
14+
31,
15+
589,
16+
2477,
17+
88,
18+
370,
19+
816,
20+
2761,
21+
17,
22+
66,
23+
2607,
24+
18,
25+
9,
26+
3,
27+
]
28+
29+
30+
class Program_weight_tensor_data_1:
31+
name = "data_1"
32+
shape = [1, 21]
33+
dtype = "int64"
34+
data = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
35+
36+
37+
class Program_weight_tensor_data_2:
38+
name = "data_2"
39+
shape = [1, 21]
40+
dtype = "int64"
41+
data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]

0 commit comments

Comments
 (0)