-
Notifications
You must be signed in to change notification settings - Fork 11
Open
Description
mymodel/main.py 中的主循环:
try:
prompt = input()
anchor = prompt.find("->")
if anchor == -1:
print(
"Error: Invalid conversation format, must contains ->, but {}".format(
prompt
),
flush=True,
)
continue
prefix = prompt[:anchor].strip() + "->"
conversation = json.loads(prompt[anchor + 2 :])
assert "prompt" in conversation
print("input is:{}".format(prompt))
prompt = conversation.pop("prompt")
# Inference: Generation of the output text and audio
audio_file = ""
for content in prompt:
if content["role"] == "user":
for line in content["contents"]:
if line["type"] == "audio":
audio_file = line["value"]
# Inference: Generation of the output text and audio
audio, text = generate_from_wav(
audio_file,
model,
codec_decoder,
dataset_config,
decode_config,
logger,
device,
model_config,
tone_dir,
audio_prompt_path,
layer_shift=layer_shift
)
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
sf.write(
f.name,
audio.reshape(-1).detach().cpu().numpy(),
samplerate=24000,
)
retry = 3
while retry:
retry -= 1
print(
prefix + json.dumps({"text": text[0], "audio": f.name}),
flush=True,
)
rlist, _, _ = select.select([sys.stdin], [], [], 1)
if rlist:
finish = sys.stdin.readline().strip()
if finish == "{}close".format(prefix):
break
print("not found close signal, will emit again", flush=True)
except Exception as e:
import traceback
traceback.print_exc()
print("Error:" + str(e))
模型信息:
class MyModel(OfflineModel):
def __init__(
self,
path: str,
sample_params: Dict = None,
*args,
**kwargs,
):
self.command_args = {
"path": path,
}
self.command_args["speech"] = ""
super().__init__(is_chat=True, sample_params=sample_params)
def _inference(self, prompt: PromptStruct, **kwargs):
import uuid
uid = str(uuid.uuid4())
prefix = f"{uid}->"
input_o = {"prompt": prompt}
input_o.update(kwargs)
while True:
_, wlist, _ = select.select([], [self.process.stdin], [], 60)
if wlist:
self.process.stdin.write(f"{prefix}{json.dumps(input_o)}\n")
self.process.stdin.flush()
print("already write in")
break
while True:
reads, _, _ = select.select(
[self.process.stdout, self.process.stderr], [], [], 1.0
)
for read in reads:
if read is self.process.stdout:
result = self.process.stdout.readline()
if result:
if result.startswith(prefix):
self.process.stdin.write("{}close\n".format(prefix))
self.process.stdin.flush()
res = json.loads(result[len(prefix) :])
if len(res) == 1:
return res["text"]
return json.dumps(res, ensure_ascii=False)
elif result.startswith("Error:"):
raise RuntimeError(
"mimicpm-o 2.6 failed: {}".format(result)
)
else:
logger.info(result)
if read is self.process.stderr:
error_output = self.process.stderr.readline()
if error_output:
print(f"stderr: {error_output.strip()}")
报错信息:Traceback (most recent call last):\n File \"/UltraEval-Audio/audio_evals/eval_task.py\", line 75, in _run\n score, ans = self._eval(\n File \"/UltraEval-Audio/audio_evals/eval_task.py\", line 52, in _eval\n output = self.predictor.inference(prompt)\n File \"/UltraEval-Audio/audio_evals/models/model.py\", line 75, in inference\n return super().inference(prompt, **kwargs)\n File \"/UltraEval-Audio/audio_evals/models/model.py\", line 38, in inference\n return self._inference(prompt, **sample_params)\n File \"/UltraEval-Audio/audio_evals/models/my_model.py\", line 49, in _inference\n self.process.stdin.flush()\nBrokenPipeError: [Errno 32] Broken pipe\n
Metadata
Metadata
Assignees
Labels
No labels