From b85ae146a7fa42542c639c6a94692972ac7e36ca Mon Sep 17 00:00:00 2001 From: rerorero Date: Fri, 14 Jun 2024 03:32:09 +0900 Subject: [PATCH] fix: JSON mode with an image doesn't work for Gemini (#5169) --- .../model_providers/__base/large_language_model.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/api/core/model_runtime/model_providers/__base/large_language_model.py b/api/core/model_runtime/model_providers/__base/large_language_model.py index f14862595..ef633c61c 100644 --- a/api/core/model_runtime/model_providers/__base/large_language_model.py +++ b/api/core/model_runtime/model_providers/__base/large_language_model.py @@ -14,6 +14,7 @@ from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResu from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, PromptMessage, + PromptMessageContentType, PromptMessageTool, SystemPromptMessage, UserPromptMessage, @@ -205,8 +206,14 @@ if you are not sure about the structure. )) if len(prompt_messages) > 0 and isinstance(prompt_messages[-1], UserPromptMessage): - # add ```JSON\n to the last message - prompt_messages[-1].content += f"\n```{code_block}\n" + # add ```JSON\n to the last text message + if isinstance(prompt_messages[-1].content, str): + prompt_messages[-1].content += f"\n```{code_block}\n" + elif isinstance(prompt_messages[-1].content, list): + for i in range(len(prompt_messages[-1].content) - 1, -1, -1): + if prompt_messages[-1].content[i].type == PromptMessageContentType.TEXT: + prompt_messages[-1].content[i].data += f"\n```{code_block}\n" + break else: # append a user message prompt_messages.append(UserPromptMessage(