Skip to content

Visualize Ham2Pose predictions #4

@m7mdhka

Description

@m7mdhka

Hello,

How I can save the Ham2Pose model predictions as .pose format for use it in pose_to_video?

def pred(model, dataset, output_dir, gen_k=30, vis=True, subset=None):
    os.makedirs(output_dir, exist_ok=True)
    _, num_pose_joints, num_pose_dims = dataset[0]["pose"]["data"].shape
    pose_header = dataset[0]["pose"]["obj"].header
    preds = []

    model.eval()
    with torch.no_grad():
        for i, datum in enumerate(dataset):
            if subset is not None and datum["id"] not in subset:
                continue
            if i >= gen_k and subset is None:
                break
            first_pose = datum["pose"]["data"][0]
            seq_iter = model.forward(text=datum["text"], first_pose=first_pose.cuda())
            for j in range(model.num_steps):
                seq = next(seq_iter)
                    
            if vis:
                visualize_seq(seq, 
                  pose_header, 
                  output_dir, 
                  datum["id"], 
                  #datum["pose"]["obj"]
                )
            else:
                data = torch.unsqueeze(seq, 1).cpu()
                conf = torch.ones_like(data[:, :, :, 0])
                pose_body = NumPyPoseBody(25, data.numpy(), conf.numpy())
                predicted_pose = Pose(pose_header, pose_body)
                pose_hide_legs(predicted_pose)
                preds.append(predicted_pose)
    return preds

these are the predictions of Ham2Pose.

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions