Skip to content

Commit 03679e2

Browse files
authored
[few-shot] add encoding (#4457)
1 parent cf712a8 commit 03679e2

File tree

3 files changed

+39
-39
lines changed

3 files changed

+39
-39
lines changed

applications/text_classification/hierarchical/few-shot/infer.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -12,19 +12,19 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import os
16-
import six
17-
import json
18-
import psutil
1915
import argparse
16+
import json
17+
import os
2018

2119
import numpy as np
20+
import onnxruntime as ort
21+
import paddle2onnx
22+
import psutil
23+
import six
2224

23-
from paddlenlp.utils.log import logger
2425
from paddlenlp.prompt import AutoTemplate, PromptDataCollatorWithPadding
25-
from paddlenlp.transformers import AutoTokenizer, AutoModelForMaskedLM
26-
import paddle2onnx
27-
import onnxruntime as ort
26+
from paddlenlp.transformers import AutoModelForMaskedLM, AutoTokenizer
27+
from paddlenlp.utils.log import logger
2828

2929
# yapf: disable
3030
parser = argparse.ArgumentParser()
@@ -63,16 +63,16 @@ def __init__(self, model_path_prefix, device="cpu", device_id=0, use_fp16=False,
6363
)
6464
infer_model_dir = model_path_prefix.rsplit("/", 1)[0]
6565
float_onnx_file = os.path.join(infer_model_dir, "model.onnx")
66-
with open(float_onnx_file, "wb") as f:
66+
with open(float_onnx_file, "wb", encoding="utf-8") as f:
6767
f.write(onnx_model)
6868

6969
if device == "gpu":
7070
logger.info(">>> [InferBackend] Use GPU to inference ...")
7171
providers = ["CUDAExecutionProvider"]
7272
if use_fp16:
7373
logger.info(">>> [InferBackend] Use FP16 to inference ...")
74-
from onnxconverter_common import float16
7574
import onnx
75+
from onnxconverter_common import float16
7676

7777
fp16_model_file = os.path.join(infer_model_dir, "fp16_model.onnx")
7878
onnx_model = onnx.load_model(float_onnx_file)
@@ -97,7 +97,7 @@ def __init__(self, model_path_prefix, device="cpu", device_id=0, use_fp16=False,
9797
assert "CUDAExecutionProvider" in self.predictor.get_providers()
9898
except AssertionError:
9999
raise AssertionError(
100-
f"The environment for GPU inference is not set properly. "
100+
"The environment for GPU inference is not set properly. "
101101
"A possible cause is that you had installed both onnxruntime and onnxruntime-gpu. "
102102
"Please run the following commands to reinstall: \n "
103103
"1) pip uninstall -y onnxruntime onnxruntime-gpu \n 2) pip install onnxruntime-gpu"
@@ -130,7 +130,7 @@ def __init__(self, args):
130130
def post_init(self):
131131
export_path = os.path.dirname(self.args.model_path_prefix)
132132
template_path = os.path.join(export_path, "template_config.json")
133-
with open(template_path, "r") as fp:
133+
with open(template_path, "r", encoding="utf-8") as fp:
134134
prompt = json.load(fp)
135135
template = AutoTemplate.create_from(prompt, self.tokenizer, self.args.max_length, self.model)
136136
keywords = template.extract_template_keywords(template.prompt)
@@ -140,7 +140,7 @@ def post_init(self):
140140
if "encoder" in keywords:
141141
inputs.append("encoder_ids")
142142
verbalizer_path = os.path.join(export_path, "verbalizer_config.json")
143-
with open(verbalizer_path, "r") as fp:
143+
with open(verbalizer_path, "r", encoding="utf-8") as fp:
144144
label_words = json.load(fp)
145145
labels = sorted(list(label_words.keys()))
146146

applications/text_classification/multi_class/few-shot/infer.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -12,19 +12,19 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import os
16-
import six
17-
import json
18-
import psutil
1915
import argparse
16+
import json
17+
import os
2018

2119
import numpy as np
20+
import onnxruntime as ort
21+
import paddle2onnx
22+
import psutil
23+
import six
2224

23-
from paddlenlp.utils.log import logger
2425
from paddlenlp.prompt import AutoTemplate, PromptDataCollatorWithPadding
25-
from paddlenlp.transformers import AutoTokenizer, AutoModelForMaskedLM
26-
import paddle2onnx
27-
import onnxruntime as ort
26+
from paddlenlp.transformers import AutoModelForMaskedLM, AutoTokenizer
27+
from paddlenlp.utils.log import logger
2828

2929
# yapf: disable
3030
parser = argparse.ArgumentParser()
@@ -63,16 +63,16 @@ def __init__(self, model_path_prefix, device="cpu", device_id=0, use_fp16=False,
6363
)
6464
infer_model_dir = model_path_prefix.rsplit("/", 1)[0]
6565
float_onnx_file = os.path.join(infer_model_dir, "model.onnx")
66-
with open(float_onnx_file, "wb") as f:
66+
with open(float_onnx_file, "wb", encoding="utf-8") as f:
6767
f.write(onnx_model)
6868

6969
if device == "gpu":
7070
logger.info(">>> [InferBackend] Use GPU to inference ...")
7171
providers = ["CUDAExecutionProvider"]
7272
if use_fp16:
7373
logger.info(">>> [InferBackend] Use FP16 to inference ...")
74-
from onnxconverter_common import float16
7574
import onnx
75+
from onnxconverter_common import float16
7676

7777
fp16_model_file = os.path.join(infer_model_dir, "fp16_model.onnx")
7878
onnx_model = onnx.load_model(float_onnx_file)
@@ -98,7 +98,7 @@ def __init__(self, model_path_prefix, device="cpu", device_id=0, use_fp16=False,
9898
assert "CUDAExecutionProvider" in self.predictor.get_providers()
9999
except AssertionError:
100100
raise AssertionError(
101-
f"The environment for GPU inference is not set properly. "
101+
"The environment for GPU inference is not set properly. "
102102
"A possible cause is that you had installed both onnxruntime and onnxruntime-gpu. "
103103
"Please run the following commands to reinstall: \n "
104104
"1) pip uninstall -y onnxruntime onnxruntime-gpu \n 2) pip install onnxruntime-gpu"
@@ -131,7 +131,7 @@ def __init__(self, args):
131131
def post_init(self):
132132
export_path = os.path.dirname(self.args.model_path_prefix)
133133
template_path = os.path.join(export_path, "template_config.json")
134-
with open(template_path, "r") as fp:
134+
with open(template_path, "r", encoding="utf-8") as fp:
135135
prompt = json.load(fp)
136136
template = AutoTemplate.create_from(prompt, self.tokenizer, self.args.max_length, self.model)
137137
keywords = template.extract_template_keywords(template.prompt)
@@ -143,7 +143,7 @@ def post_init(self):
143143
if "encoder" in keywords:
144144
inputs.append("encoder_ids")
145145
verbalizer_path = os.path.join(export_path, "verbalizer_config.json")
146-
with open(verbalizer_path, "r") as fp:
146+
with open(verbalizer_path, "r", encoding="utf-8") as fp:
147147
label_words = json.load(fp)
148148
labels = sorted(list(label_words.keys()))
149149

applications/text_classification/multi_label/few-shot/infer.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -12,19 +12,19 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import os
16-
import six
17-
import json
18-
import psutil
1915
import argparse
16+
import json
17+
import os
2018

2119
import numpy as np
20+
import onnxruntime as ort
21+
import paddle2onnx
22+
import psutil
23+
import six
2224

23-
from paddlenlp.utils.log import logger
2425
from paddlenlp.prompt import AutoTemplate, PromptDataCollatorWithPadding
25-
from paddlenlp.transformers import AutoTokenizer, AutoModelForMaskedLM
26-
import paddle2onnx
27-
import onnxruntime as ort
26+
from paddlenlp.transformers import AutoModelForMaskedLM, AutoTokenizer
27+
from paddlenlp.utils.log import logger
2828

2929
# yapf: disable
3030
parser = argparse.ArgumentParser()
@@ -63,16 +63,16 @@ def __init__(self, model_path_prefix, device="cpu", device_id=0, use_fp16=False,
6363
)
6464
infer_model_dir = model_path_prefix.rsplit("/", 1)[0]
6565
float_onnx_file = os.path.join(infer_model_dir, "model.onnx")
66-
with open(float_onnx_file, "wb") as f:
66+
with open(float_onnx_file, "wb", encoding="utf-8") as f:
6767
f.write(onnx_model)
6868

6969
if device == "gpu":
7070
logger.info(">>> [InferBackend] Use GPU to inference ...")
7171
providers = ["CUDAExecutionProvider"]
7272
if use_fp16:
7373
logger.info(">>> [InferBackend] Use FP16 to inference ...")
74-
from onnxconverter_common import float16
7574
import onnx
75+
from onnxconverter_common import float16
7676

7777
fp16_model_file = os.path.join(infer_model_dir, "fp16_model.onnx")
7878
onnx_model = onnx.load_model(float_onnx_file)
@@ -98,7 +98,7 @@ def __init__(self, model_path_prefix, device="cpu", device_id=0, use_fp16=False,
9898
assert "CUDAExecutionProvider" in self.predictor.get_providers()
9999
except AssertionError:
100100
raise AssertionError(
101-
f"The environment for GPU inference is not set properly. "
101+
"The environment for GPU inference is not set properly. "
102102
"A possible cause is that you had installed both onnxruntime and onnxruntime-gpu. "
103103
"Please run the following commands to reinstall: \n "
104104
"1) pip uninstall -y onnxruntime onnxruntime-gpu \n 2) pip install onnxruntime-gpu"
@@ -131,7 +131,7 @@ def __init__(self, args):
131131
def post_init(self):
132132
export_path = os.path.dirname(self.args.model_path_prefix)
133133
template_path = os.path.join(export_path, "template_config.json")
134-
with open(template_path, "r") as fp:
134+
with open(template_path, "r", encoding="utf-8") as fp:
135135
prompt = json.load(fp)
136136
template = AutoTemplate.create_from(prompt, self.tokenizer, self.args.max_length, self.model)
137137
keywords = template.extract_template_keywords(template.prompt)
@@ -141,7 +141,7 @@ def post_init(self):
141141
if "encoder" in keywords:
142142
inputs.append("encoder_ids")
143143
verbalizer_path = os.path.join(export_path, "verbalizer_config.json")
144-
with open(verbalizer_path, "r") as fp:
144+
with open(verbalizer_path, "r", encoding="utf-8") as fp:
145145
label_words = json.load(fp)
146146
labels = sorted(list(label_words.keys()))
147147

0 commit comments

Comments
 (0)