2023-09-12 10:26:12 +08:00
|
|
|
import logging
|
|
|
|
|
|
|
|
import openai
|
|
|
|
|
|
|
|
from core.model_providers.error import LLMBadRequestError
|
|
|
|
from core.model_providers.providers.base import BaseModelProvider
|
2023-09-18 17:32:31 +08:00
|
|
|
from core.model_providers.providers.hosted import hosted_config, hosted_model_providers
|
2023-09-12 10:26:12 +08:00
|
|
|
from models.provider import ProviderType
|
|
|
|
|
|
|
|
|
|
|
|
def check_moderation(model_provider: BaseModelProvider, text: str) -> bool:
|
2023-09-18 17:32:31 +08:00
|
|
|
if hosted_config.moderation.enabled is True and hosted_model_providers.openai:
|
2023-09-12 10:26:12 +08:00
|
|
|
if model_provider.provider.provider_type == ProviderType.SYSTEM.value \
|
2023-09-18 17:32:31 +08:00
|
|
|
and model_provider.provider_name in hosted_config.moderation.providers:
|
2023-09-12 10:26:12 +08:00
|
|
|
# 2000 text per chunk
|
|
|
|
length = 2000
|
2023-09-18 17:32:31 +08:00
|
|
|
text_chunks = [text[i:i + length] for i in range(0, len(text), length)]
|
|
|
|
|
|
|
|
max_text_chunks = 32
|
|
|
|
chunks = [text_chunks[i:i + max_text_chunks] for i in range(0, len(text_chunks), max_text_chunks)]
|
|
|
|
|
|
|
|
for text_chunk in chunks:
|
|
|
|
try:
|
|
|
|
moderation_result = openai.Moderation.create(input=text_chunk,
|
|
|
|
api_key=hosted_model_providers.openai.api_key)
|
|
|
|
except Exception as ex:
|
|
|
|
logging.exception(ex)
|
|
|
|
raise LLMBadRequestError('Rate limit exceeded, please try again later.')
|
|
|
|
|
|
|
|
for result in moderation_result.results:
|
|
|
|
if result['flagged'] is True:
|
|
|
|
return False
|
2023-09-12 10:26:12 +08:00
|
|
|
|
|
|
|
return True
|