mirror of
https://gitee.com/dify_ai/dify.git
synced 2024-11-30 10:18:13 +08:00
fix typo recale to recalc (#2670)
This commit is contained in:
parent
83a6b0c626
commit
34387ec0f1
@ -130,7 +130,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
|
|||||||
input=query
|
input=query
|
||||||
)
|
)
|
||||||
|
|
||||||
# recale llm max tokens
|
# recalc llm max tokens
|
||||||
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
||||||
# invoke model
|
# invoke model
|
||||||
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
|
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
|
||||||
|
@ -105,7 +105,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
|
|||||||
messages_ids=message_file_ids
|
messages_ids=message_file_ids
|
||||||
)
|
)
|
||||||
|
|
||||||
# recale llm max tokens
|
# recalc llm max tokens
|
||||||
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
||||||
# invoke model
|
# invoke model
|
||||||
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
|
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
|
||||||
|
Loading…
Reference in New Issue
Block a user