Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit 8c14c55

Browse files
committed
there is an issue in getting max sequence length
it was defaulting to too small a number. This will fix some things for now
1 parent eee8abc commit 8c14c55

File tree

1 file changed

+4
-3
lines changed

1 file changed

+4
-3
lines changed

torchchat/usages/openai_api.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -289,8 +289,8 @@ def __init__(self, *args, **kwargs):
289289
else self.model.text_transformer_args.max_seq_length
290290
)
291291
except:
292-
# can not find max_seq_length in model config, use default value
293-
self.max_seq_length = 128
292+
self.max_seq_length = 2048
293+
print(f"can not find max_seq_length in model config, use default value: {self.max_seq_length}")
294294
# The System fingerprint is a unique identifier for the model and its configuration.
295295
self.system_fingerprint = (
296296
f"{self.builder_args.device}_{self.builder_args.precision}"
@@ -321,10 +321,11 @@ def _openai_messages_to_torchtune_messages(
321321
base64_decoded = base64.b64decode(
322322
content_dict["image_url"].split(";base64,")[1]
323323
)
324+
image = Image.open(BytesIO(base64_decoded))
324325
converted_content.append(
325326
{
326327
"type": "image",
327-
"content": Image.open(BytesIO(base64_decoded)),
328+
"content": image,
328329
}
329330
)
330331
torchtune_messages.append(

0 commit comments

Comments
 (0)