99
1010Note that in each inference, we should be able to pick the corresponding prompt and checkpoint folder
1111"""
12- from llava .action .utils import generate_label_map
12+ from llavaction .action .utils import generate_label_map
1313
1414from pathlib import Path
15- from llava .action .utils import AvionMultiChoiceGenerator as ActionMultiChoiceGenerator
16- from llava .action .llava_inference import llava_inference
15+ from llavaction .action .utils import AvionMultiChoiceGenerator as ActionMultiChoiceGenerator
16+ from llavaction .action .llava_inference import llava_inference
1717import json
1818import cv2
1919# root = '/data/EK100/EK100_320p_15sec_30fps_libx264'
@@ -40,7 +40,7 @@ def visualize_with_random(n_samples, offset = 0, question_type = 'mc_'):
4040 """
4141 Here we should test gpt-4o, gpt-4o-mini with different prompts
4242 """
43- from llava .action .chatgpt_utils import GPTInferenceAnnotator
43+ from llavaction .action .chatgpt_utils import GPTInferenceAnnotator
4444 inferencer = GPTInferenceAnnotator (gpt_model ,
4545 root ,
4646 annotation_file ,
@@ -60,7 +60,7 @@ def visualize_with_gpt_with_tim(n_samples, offset = 0, question_type = 'mc_'):
6060 """
6161 Here we should test gpt-4o, gpt-4o-mini with different prompts
6262 """
63- from llava .action .chatgpt_utils import GPTInferenceAnnotator
63+ from llavaction .action .chatgpt_utils import GPTInferenceAnnotator
6464 inferencer = GPTInferenceAnnotator (gpt_model ,
6565 root ,
6666 annotation_file ,
@@ -81,7 +81,7 @@ def visualize_with_gpt_with_avion(n_samples, offset = 0, question_type = 'mc_'):
8181 """
8282 Here we should test gpt-4o, gpt-4o-mini with different prompts
8383 """
84- from llava .action .chatgpt_utils import GPTInferenceAnnotator
84+ from llavaction .action .chatgpt_utils import GPTInferenceAnnotator
8585 inferencer = GPTInferenceAnnotator (gpt_model ,
8686 root ,
8787 annotation_file ,
@@ -100,7 +100,7 @@ def visualize_with_gpt_with_avion(n_samples, offset = 0, question_type = 'mc_'):
100100
101101def search_option_data_by_uid (uid , anno_file , gen_type = 'tim' ):
102102 import csv
103- from llava .action .dataset import datetime2sec
103+ from llavaction .action .dataset import datetime2sec
104104 csv_reader = csv .reader (open (anno_file , 'r' ))
105105 _ = next (csv_reader ) # skip the header
106106 query_vid_path = '_' .join (uid .split ('_' )[:2 ]).replace ('-' , '/' )
@@ -166,7 +166,7 @@ def save_visualization(vis_folder, frames, uid):
166166 video_out .release ()
167167
168168def visualize_with_uid (data_root , uid , out_folder ):
169- from llava .action .utils import avion_video_loader
169+ from llavaction .action .utils import avion_video_loader
170170
171171 vid_path = '_' .join (uid .split ('_' )[:2 ]).replace ('-' , '/' )
172172 start_timestamp , end_timestamp = uid .split ('_' )[2 :]
@@ -193,12 +193,12 @@ def visualize_with_uid(data_root, uid, out_folder):
193193def visualize_with_llava (pretrained_path , uid , question_type , gen_type ):
194194 """
195195 """
196- from llava .action .ek_eval import prepare_llava
197- from llava .action .dataset import VideoMultiChoiceDataset
196+ from llavaction .action .ek_eval import prepare_llava
197+ from llavaction .action .dataset import VideoMultiChoiceDataset
198198
199199 import torch
200200
201- from llava .action .utils import avion_video_loader
201+ from llavaction .action .utils import avion_video_loader
202202 val_metadata = '/data/shaokai/epic-kitchens-100-annotations/EPIC_100_validation.csv'
203203
204204 gpu_val_transform_ls = []
0 commit comments