Skip to content

Commit

Permalink
fix inference_test (NVlabs#108)
Browse files Browse the repository at this point in the history
  • Loading branch information
yaolug authored Jun 26, 2024
1 parent e723424 commit 2dd6199
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion llava/eval/run_vila.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def eval_model(args):
assert osp.exists(args.video_file), "video file not found"
video_file = args.video_file
from llava.mm_utils import opencv_extract_frames
images = opencv_extract_frames(video_file, args.num_video_frames)
images, num_frames = opencv_extract_frames(video_file, args.num_video_frames)

model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, model_name, args.model_base)
Expand Down
4 changes: 2 additions & 2 deletions tests/bash_tests/inference_tests.sh
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# TODO(ligeng): replace with the model with finalized 7B model.
python llava/eval/run_vila.py \
--model-path Efficient-Large-Model/CI-new-format-llama7b-siglip \
--model-path Efficient-Large-Model/VILA1.5-3b \
--query "Please describe the image" \
--image-file inference_test/test_data/caption_meat.jpeg

python llava/eval/run_vila.py \
--model-path Efficient-Large-Model/CI-new-format-llama7b-siglip \
--model-path Efficient-Large-Model/VILA1.5-3b \
--query "Please describe the video" \
--video-file https://huggingface.co/datasets/Efficient-Large-Model/VILA-inference-demos/resolve/main/OAI-sora-tokyo-walk.mp4

0 comments on commit 2dd6199

Please sign in to comment.