Skip to content

Commit

Permalink
fix generation use_cache issue for mpt
Browse files Browse the repository at this point in the history
  • Loading branch information
i-gao committed Nov 1, 2023
1 parent 17cd546 commit aa995e3
Showing 1 changed file with 2 additions and 1 deletion.
3 changes: 2 additions & 1 deletion open_flamingo/src/vlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,7 @@ def generate(
**new_inputs,
past_key_values=past_key_values,
num_beams=num_beams,
use_cache=True,
**kwargs,
)
self._post_forward_hook()
Expand Down Expand Up @@ -420,7 +421,7 @@ def _prepare_inputs_for_forward(
v.repeat_interleave(num_beams, dim=0)
)
for k, v in past_key_values
]
] if past_key_values is not None else None
return {
"input_ids": lang_x,
"attention_mask": attention_mask,
Expand Down

0 comments on commit aa995e3

Please sign in to comment.