From 9e2cbbb45653e7dd4419528adcaecaf3f1718972 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 12 Jan 2026 10:43:59 +0000 Subject: [PATCH 1/2] feat: implement OpenAI LLM client with full API support Implements OpenAiLlmClient with complete functionality for OpenAI API: - chat(): Synchronous chat completion with response parsing for choices[0].message.content, finish_reason, model, and usage tokens - streamChat(): Server-Sent Events (SSE) based streaming with proper "data: " prefix handling and "[DONE]" marker detection - isAvailable(): API availability check using /models endpoint with API key validation Also adds comprehensive unit tests for both OpenAI and Ollama clients: - OpenAiLlmClientTest: Tests for getName, isAvailable (various cases), buildRequestBody, convertMessage, and HTTP client initialization - OllamaLlmClientTest: Matching test coverage for Ollama client The implementation follows the same patterns as OllamaLlmClient for consistency, using OkHttpClient and Jackson ObjectMapper. --- .../fess/llm/openai/OpenAiLlmClient.java | 306 ++++++++++++++++- .../fess/llm/ollama/OllamaLlmClientTest.java | 297 ++++++++++++++++ .../fess/llm/openai/OpenAiLlmClientTest.java | 316 ++++++++++++++++++ 3 files changed, 911 insertions(+), 8 deletions(-) create mode 100644 src/test/java/org/codelibs/fess/llm/ollama/OllamaLlmClientTest.java create mode 100644 src/test/java/org/codelibs/fess/llm/openai/OpenAiLlmClientTest.java diff --git a/src/main/java/org/codelibs/fess/llm/openai/OpenAiLlmClient.java b/src/main/java/org/codelibs/fess/llm/openai/OpenAiLlmClient.java index 6e4bd9a27..c6370ae51 100644 --- a/src/main/java/org/codelibs/fess/llm/openai/OpenAiLlmClient.java +++ b/src/main/java/org/codelibs/fess/llm/openai/OpenAiLlmClient.java @@ -15,6 +15,16 @@ */ package org.codelibs.fess.llm.openai; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.codelibs.core.lang.StringUtil; @@ -22,19 +32,39 @@ import org.codelibs.fess.llm.LlmChatResponse; import org.codelibs.fess.llm.LlmClient; import org.codelibs.fess.llm.LlmException; +import org.codelibs.fess.llm.LlmMessage; import org.codelibs.fess.llm.LlmStreamCallback; import org.codelibs.fess.util.ComponentUtil; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; + +import okhttp3.MediaType; +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.RequestBody; +import okhttp3.Response; + /** * LLM client implementation for OpenAI API. * + * OpenAI provides cloud-based LLM services including GPT-4 and other models. + * This client supports both synchronous and streaming chat completions. + * * @author FessProject * @see OpenAI API Reference */ public class OpenAiLlmClient implements LlmClient { private static final Logger logger = LogManager.getLogger(OpenAiLlmClient.class); + private static final MediaType JSON_MEDIA_TYPE = MediaType.parse("application/json; charset=utf-8"); private static final String NAME = "openai"; + private static final String SSE_DATA_PREFIX = "data: "; + private static final String SSE_DONE_MARKER = "[DONE]"; + + private OkHttpClient httpClient; + private final ObjectMapper objectMapper = new ObjectMapper(); /** * Default constructor. @@ -44,11 +74,14 @@ public OpenAiLlmClient() { } /** - * Initializes the client. + * Initializes the HTTP client. */ public void init() { + final int timeout = getTimeout(); + httpClient = new OkHttpClient.Builder().connectTimeout(timeout, TimeUnit.MILLISECONDS) + .readTimeout(timeout, TimeUnit.MILLISECONDS).writeTimeout(timeout, TimeUnit.MILLISECONDS).build(); if (logger.isDebugEnabled()) { - logger.debug("Initialized OpenAiLlmClient"); + logger.debug("Initialized OpenAiLlmClient with timeout: {}ms", timeout); } } @@ -59,20 +92,259 @@ public String getName() { @Override public boolean isAvailable() { - // Not yet implemented - return false until chat methods are properly implemented - return false; + final String apiKey = getApiKey(); + if (StringUtil.isBlank(apiKey)) { + if (logger.isDebugEnabled()) { + logger.debug("OpenAI is not available. apiKey is blank"); + } + return false; + } + final String apiUrl = getApiUrl(); + if (StringUtil.isBlank(apiUrl)) { + if (logger.isDebugEnabled()) { + logger.debug("OpenAI is not available. apiUrl is blank"); + } + return false; + } + try { + final Request request = new Request.Builder().url(apiUrl + "/models").get() + .addHeader("Authorization", "Bearer " + apiKey).build(); + try (Response response = getHttpClient().newCall(request).execute()) { + final boolean available = response.isSuccessful(); + if (logger.isDebugEnabled()) { + logger.debug("OpenAI availability check. url={}, statusCode={}, available={}", apiUrl, response.code(), available); + } + return available; + } + } catch (final Exception e) { + if (logger.isDebugEnabled()) { + logger.debug("OpenAI is not available. url={}, error={}", apiUrl, e.getMessage()); + } + return false; + } } @Override public LlmChatResponse chat(final LlmChatRequest request) { - // TODO: Implement OpenAI chat - throw new LlmException("OpenAI client not yet implemented"); + final String url = getApiUrl() + "/chat/completions"; + final Map requestBody = buildRequestBody(request, false); + + if (logger.isDebugEnabled()) { + logger.debug("Sending chat request to OpenAI. url={}, model={}, messageCount={}", url, requestBody.get("model"), + request.getMessages().size()); + } + + try { + final String json = objectMapper.writeValueAsString(requestBody); + final Request httpRequest = new Request.Builder().url(url).post(RequestBody.create(json, JSON_MEDIA_TYPE)) + .addHeader("Authorization", "Bearer " + getApiKey()).addHeader("Content-Type", "application/json").build(); + + try (Response response = getHttpClient().newCall(httpRequest).execute()) { + if (!response.isSuccessful()) { + logger.warn("OpenAI API error. url={}, statusCode={}, message={}", url, response.code(), response.message()); + throw new LlmException("OpenAI API error: " + response.code() + " " + response.message()); + } + + final String responseBody = response.body() != null ? response.body().string() : ""; + final JsonNode jsonNode = objectMapper.readTree(responseBody); + + final LlmChatResponse chatResponse = new LlmChatResponse(); + // Parse choices[0].message.content + if (jsonNode.has("choices") && jsonNode.get("choices").isArray() && jsonNode.get("choices").size() > 0) { + final JsonNode firstChoice = jsonNode.get("choices").get(0); + if (firstChoice.has("message") && firstChoice.get("message").has("content")) { + chatResponse.setContent(firstChoice.get("message").get("content").asText()); + } + if (firstChoice.has("finish_reason") && !firstChoice.get("finish_reason").isNull()) { + chatResponse.setFinishReason(firstChoice.get("finish_reason").asText()); + } + } + // Parse model + if (jsonNode.has("model")) { + chatResponse.setModel(jsonNode.get("model").asText()); + } + // Parse usage + if (jsonNode.has("usage")) { + final JsonNode usage = jsonNode.get("usage"); + if (usage.has("prompt_tokens")) { + chatResponse.setPromptTokens(usage.get("prompt_tokens").asInt()); + } + if (usage.has("completion_tokens")) { + chatResponse.setCompletionTokens(usage.get("completion_tokens").asInt()); + } + if (usage.has("total_tokens")) { + chatResponse.setTotalTokens(usage.get("total_tokens").asInt()); + } + } + + if (logger.isDebugEnabled()) { + logger.debug( + "Received chat response from OpenAI. model={}, promptTokens={}, completionTokens={}, totalTokens={}, contentLength={}", + chatResponse.getModel(), chatResponse.getPromptTokens(), chatResponse.getCompletionTokens(), + chatResponse.getTotalTokens(), chatResponse.getContent() != null ? chatResponse.getContent().length() : 0); + } + + return chatResponse; + } + } catch (final LlmException e) { + throw e; + } catch (final Exception e) { + logger.warn("Failed to call OpenAI API. url={}, error={}", url, e.getMessage(), e); + throw new LlmException("Failed to call OpenAI API", e); + } } @Override public void streamChat(final LlmChatRequest request, final LlmStreamCallback callback) { - // TODO: Implement OpenAI streaming chat - throw new LlmException("OpenAI streaming not yet implemented"); + final String url = getApiUrl() + "/chat/completions"; + final Map requestBody = buildRequestBody(request, true); + + if (logger.isDebugEnabled()) { + logger.debug("Starting streaming chat request to OpenAI. url={}, model={}, messageCount={}", url, requestBody.get("model"), + request.getMessages().size()); + } + + try { + final String json = objectMapper.writeValueAsString(requestBody); + final Request httpRequest = new Request.Builder().url(url).post(RequestBody.create(json, JSON_MEDIA_TYPE)) + .addHeader("Authorization", "Bearer " + getApiKey()).addHeader("Content-Type", "application/json").build(); + + try (Response response = getHttpClient().newCall(httpRequest).execute()) { + if (!response.isSuccessful()) { + logger.warn("OpenAI streaming API error. url={}, statusCode={}, message={}", url, response.code(), response.message()); + throw new LlmException("OpenAI API error: " + response.code() + " " + response.message()); + } + + if (response.body() == null) { + logger.warn("Empty response from OpenAI streaming API. url={}", url); + throw new LlmException("Empty response from OpenAI"); + } + + int chunkCount = 0; + try (BufferedReader reader = + new BufferedReader(new InputStreamReader(response.body().byteStream(), StandardCharsets.UTF_8))) { + String line; + while ((line = reader.readLine()) != null) { + if (StringUtil.isBlank(line)) { + continue; + } + + // OpenAI SSE format: "data: {json}" or "data: [DONE]" + if (!line.startsWith(SSE_DATA_PREFIX)) { + continue; + } + + final String data = line.substring(SSE_DATA_PREFIX.length()).trim(); + if (SSE_DONE_MARKER.equals(data)) { + callback.onChunk("", true); + break; + } + + try { + final JsonNode jsonNode = objectMapper.readTree(data); + // Parse choices[0].delta.content + if (jsonNode.has("choices") && jsonNode.get("choices").isArray() && jsonNode.get("choices").size() > 0) { + final JsonNode firstChoice = jsonNode.get("choices").get(0); + final boolean done = firstChoice.has("finish_reason") && !firstChoice.get("finish_reason").isNull() + && !"null".equals(firstChoice.get("finish_reason").asText()); + + if (firstChoice.has("delta") && firstChoice.get("delta").has("content")) { + final String content = firstChoice.get("delta").get("content").asText(); + callback.onChunk(content, done); + chunkCount++; + } else if (done) { + callback.onChunk("", true); + } + + if (done) { + break; + } + } + } catch (final JsonProcessingException e) { + logger.warn("Failed to parse OpenAI streaming response. line={}", line, e); + } + } + } + + if (logger.isDebugEnabled()) { + logger.debug("Completed streaming chat from OpenAI. url={}, chunkCount={}", url, chunkCount); + } + } + } catch (final LlmException e) { + callback.onError(e); + throw e; + } catch (final IOException e) { + logger.warn("Failed to stream from OpenAI API. url={}, error={}", url, e.getMessage(), e); + final LlmException llmException = new LlmException("Failed to stream from OpenAI API", e); + callback.onError(llmException); + throw llmException; + } + } + + /** + * Builds the request body for the OpenAI API. + * + * @param request the chat request + * @param stream whether to enable streaming + * @return the request body as a map + */ + protected Map buildRequestBody(final LlmChatRequest request, final boolean stream) { + final Map body = new HashMap<>(); + + // Model + String model = request.getModel(); + if (StringUtil.isBlank(model)) { + model = getModel(); + } + body.put("model", model); + + // Messages + final List> messages = request.getMessages().stream().map(this::convertMessage).collect(Collectors.toList()); + body.put("messages", messages); + + // Stream + body.put("stream", stream); + + // Temperature (top-level for OpenAI) + if (request.getTemperature() != null) { + body.put("temperature", request.getTemperature()); + } else { + body.put("temperature", getTemperature()); + } + + // Max tokens (top-level for OpenAI) + if (request.getMaxTokens() != null) { + body.put("max_tokens", request.getMaxTokens()); + } else { + body.put("max_tokens", getMaxTokens()); + } + + return body; + } + + /** + * Converts an LlmMessage to a map for the API request. + * + * @param message the message to convert + * @return the message as a map + */ + protected Map convertMessage(final LlmMessage message) { + final Map map = new HashMap<>(); + map.put("role", message.getRole()); + map.put("content", message.getContent()); + return map; + } + + /** + * Gets the HTTP client, initializing it if necessary. + * + * @return the HTTP client + */ + protected OkHttpClient getHttpClient() { + if (httpClient == null) { + init(); + } + return httpClient; } /** @@ -110,4 +382,22 @@ protected String getModel() { protected int getTimeout() { return ComponentUtil.getFessConfig().getRagLlmOpenaiTimeoutAsInteger(); } + + /** + * Gets the temperature parameter. + * + * @return the temperature + */ + protected double getTemperature() { + return ComponentUtil.getFessConfig().getRagChatTemperatureAsDecimal().doubleValue(); + } + + /** + * Gets the maximum tokens for the response. + * + * @return the maximum tokens + */ + protected int getMaxTokens() { + return ComponentUtil.getFessConfig().getRagChatMaxTokensAsInteger(); + } } diff --git a/src/test/java/org/codelibs/fess/llm/ollama/OllamaLlmClientTest.java b/src/test/java/org/codelibs/fess/llm/ollama/OllamaLlmClientTest.java new file mode 100644 index 000000000..5895b4fb2 --- /dev/null +++ b/src/test/java/org/codelibs/fess/llm/ollama/OllamaLlmClientTest.java @@ -0,0 +1,297 @@ +/* + * Copyright 2012-2025 CodeLibs Project and the Others. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +package org.codelibs.fess.llm.ollama; + +import java.util.List; +import java.util.Map; + +import org.codelibs.fess.llm.LlmChatRequest; +import org.codelibs.fess.llm.LlmMessage; +import org.codelibs.fess.unit.UnitFessTestCase; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +public class OllamaLlmClientTest extends UnitFessTestCase { + + private TestableOllamaLlmClient client; + + @BeforeEach + @Override + protected void setUp(TestInfo testInfo) throws Exception { + super.setUp(testInfo); + client = new TestableOllamaLlmClient(); + } + + @Override + @AfterEach + protected void tearDown() throws Exception { + super.tearDown(); + } + + @Test + public void test_getName() { + assertEquals("ollama", client.getName()); + } + + @Test + public void test_isAvailable_noApiUrl() { + client.setTestApiUrl(""); + assertFalse(client.isAvailable()); + } + + @Test + public void test_isAvailable_nullApiUrl() { + client.setTestApiUrl(null); + assertFalse(client.isAvailable()); + } + + @Test + public void test_convertMessage_user() { + final LlmMessage message = LlmMessage.user("Hello, how are you?"); + final Map result = client.convertMessage(message); + + assertEquals("user", result.get("role")); + assertEquals("Hello, how are you?", result.get("content")); + } + + @Test + public void test_convertMessage_assistant() { + final LlmMessage message = LlmMessage.assistant("I'm doing well, thank you!"); + final Map result = client.convertMessage(message); + + assertEquals("assistant", result.get("role")); + assertEquals("I'm doing well, thank you!", result.get("content")); + } + + @Test + public void test_convertMessage_system() { + final LlmMessage message = LlmMessage.system("You are a helpful assistant."); + final Map result = client.convertMessage(message); + + assertEquals("system", result.get("role")); + assertEquals("You are a helpful assistant.", result.get("content")); + } + + @Test + public void test_buildRequestBody_defaultValues() { + client.setTestModel("gemma3:4b"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + assertEquals("gemma3:4b", body.get("model")); + assertEquals(false, body.get("stream")); + + @SuppressWarnings("unchecked") + final Map options = (Map) body.get("options"); + assertEquals(0.7, options.get("temperature")); + assertEquals(4096, options.get("num_predict")); + + @SuppressWarnings("unchecked") + final List> messages = (List>) body.get("messages"); + assertEquals(1, messages.size()); + assertEquals("user", messages.get(0).get("role")); + assertEquals("Hello", messages.get(0).get("content")); + } + + @Test + public void test_buildRequestBody_withRequestModel() { + client.setTestModel("gemma3:4b"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().setModel("llama3:8b").addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + assertEquals("llama3:8b", body.get("model")); + } + + @Test + public void test_buildRequestBody_withRequestTemperature() { + client.setTestModel("gemma3:4b"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().setTemperature(0.5).addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + @SuppressWarnings("unchecked") + final Map options = (Map) body.get("options"); + assertEquals(0.5, options.get("temperature")); + } + + @Test + public void test_buildRequestBody_withRequestMaxTokens() { + client.setTestModel("gemma3:4b"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().setMaxTokens(1000).addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + @SuppressWarnings("unchecked") + final Map options = (Map) body.get("options"); + assertEquals(1000, options.get("num_predict")); + } + + @Test + public void test_buildRequestBody_streaming() { + client.setTestModel("gemma3:4b"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, true); + + assertEquals(true, body.get("stream")); + } + + @Test + public void test_buildRequestBody_multipleMessages() { + client.setTestModel("gemma3:4b"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().addSystemMessage("You are a helpful assistant.") + .addUserMessage("What is the weather?").addAssistantMessage("I cannot access weather information.").addUserMessage("OK"); + + final Map body = client.buildRequestBody(request, false); + + @SuppressWarnings("unchecked") + final List> messages = (List>) body.get("messages"); + assertEquals(4, messages.size()); + + assertEquals("system", messages.get(0).get("role")); + assertEquals("You are a helpful assistant.", messages.get(0).get("content")); + + assertEquals("user", messages.get(1).get("role")); + assertEquals("What is the weather?", messages.get(1).get("content")); + + assertEquals("assistant", messages.get(2).get("role")); + assertEquals("I cannot access weather information.", messages.get(2).get("content")); + + assertEquals("user", messages.get(3).get("role")); + assertEquals("OK", messages.get(3).get("content")); + } + + @Test + public void test_buildRequestBody_blankModelUsesDefault() { + client.setTestModel("gemma3:4b"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().setModel("").addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + assertEquals("gemma3:4b", body.get("model")); + } + + @Test + public void test_buildRequestBody_nullModelUsesDefault() { + client.setTestModel("gemma3:4b"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().setModel(null).addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + assertEquals("gemma3:4b", body.get("model")); + } + + @Test + public void test_init() { + client.setTestTimeout(30000); + client.init(); + assertNotNull(client.getHttpClient()); + } + + @Test + public void test_getHttpClient_lazyInitialization() { + client.setTestTimeout(60000); + // First call should initialize the client + assertNotNull(client.getHttpClient()); + // Second call should return the same client + assertNotNull(client.getHttpClient()); + } + + /** + * Testable subclass of OllamaLlmClient that allows setting configuration values + * directly without depending on FessConfig. + */ + private static class TestableOllamaLlmClient extends OllamaLlmClient { + private String testApiUrl = "http://localhost:11434"; + private String testModel = "gemma3:4b"; + private int testTimeout = 60000; + private double testTemperature = 0.7; + private int testMaxTokens = 4096; + + void setTestApiUrl(String apiUrl) { + this.testApiUrl = apiUrl; + } + + void setTestModel(String model) { + this.testModel = model; + } + + void setTestTimeout(int timeout) { + this.testTimeout = timeout; + } + + void setTestTemperature(double temperature) { + this.testTemperature = temperature; + } + + void setTestMaxTokens(int maxTokens) { + this.testMaxTokens = maxTokens; + } + + @Override + protected String getApiUrl() { + return testApiUrl; + } + + @Override + protected String getModel() { + return testModel; + } + + @Override + protected int getTimeout() { + return testTimeout; + } + + @Override + protected double getTemperature() { + return testTemperature; + } + + @Override + protected int getMaxTokens() { + return testMaxTokens; + } + } +} diff --git a/src/test/java/org/codelibs/fess/llm/openai/OpenAiLlmClientTest.java b/src/test/java/org/codelibs/fess/llm/openai/OpenAiLlmClientTest.java new file mode 100644 index 000000000..a1606f4d1 --- /dev/null +++ b/src/test/java/org/codelibs/fess/llm/openai/OpenAiLlmClientTest.java @@ -0,0 +1,316 @@ +/* + * Copyright 2012-2025 CodeLibs Project and the Others. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +package org.codelibs.fess.llm.openai; + +import java.util.List; +import java.util.Map; + +import org.codelibs.fess.llm.LlmChatRequest; +import org.codelibs.fess.llm.LlmMessage; +import org.codelibs.fess.unit.UnitFessTestCase; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; + +public class OpenAiLlmClientTest extends UnitFessTestCase { + + private TestableOpenAiLlmClient client; + + @BeforeEach + @Override + protected void setUp(TestInfo testInfo) throws Exception { + super.setUp(testInfo); + client = new TestableOpenAiLlmClient(); + } + + @Override + @AfterEach + protected void tearDown() throws Exception { + super.tearDown(); + } + + @Test + public void test_getName() { + assertEquals("openai", client.getName()); + } + + @Test + public void test_isAvailable_noApiKey() { + client.setTestApiKey(""); + client.setTestApiUrl("https://api.openai.com/v1"); + assertFalse(client.isAvailable()); + } + + @Test + public void test_isAvailable_nullApiKey() { + client.setTestApiKey(null); + client.setTestApiUrl("https://api.openai.com/v1"); + assertFalse(client.isAvailable()); + } + + @Test + public void test_isAvailable_noApiUrl() { + client.setTestApiKey("sk-test-key"); + client.setTestApiUrl(""); + assertFalse(client.isAvailable()); + } + + @Test + public void test_isAvailable_nullApiUrl() { + client.setTestApiKey("sk-test-key"); + client.setTestApiUrl(null); + assertFalse(client.isAvailable()); + } + + @Test + public void test_convertMessage_user() { + final LlmMessage message = LlmMessage.user("Hello, how are you?"); + final Map result = client.convertMessage(message); + + assertEquals("user", result.get("role")); + assertEquals("Hello, how are you?", result.get("content")); + } + + @Test + public void test_convertMessage_assistant() { + final LlmMessage message = LlmMessage.assistant("I'm doing well, thank you!"); + final Map result = client.convertMessage(message); + + assertEquals("assistant", result.get("role")); + assertEquals("I'm doing well, thank you!", result.get("content")); + } + + @Test + public void test_convertMessage_system() { + final LlmMessage message = LlmMessage.system("You are a helpful assistant."); + final Map result = client.convertMessage(message); + + assertEquals("system", result.get("role")); + assertEquals("You are a helpful assistant.", result.get("content")); + } + + @Test + public void test_buildRequestBody_defaultValues() { + client.setTestModel("gpt-4o"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + assertEquals("gpt-4o", body.get("model")); + assertEquals(false, body.get("stream")); + assertEquals(0.7, body.get("temperature")); + assertEquals(4096, body.get("max_tokens")); + + @SuppressWarnings("unchecked") + final List> messages = (List>) body.get("messages"); + assertEquals(1, messages.size()); + assertEquals("user", messages.get(0).get("role")); + assertEquals("Hello", messages.get(0).get("content")); + } + + @Test + public void test_buildRequestBody_withRequestModel() { + client.setTestModel("gpt-4o"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().setModel("gpt-3.5-turbo").addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + assertEquals("gpt-3.5-turbo", body.get("model")); + } + + @Test + public void test_buildRequestBody_withRequestTemperature() { + client.setTestModel("gpt-4o"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().setTemperature(0.5).addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + assertEquals(0.5, body.get("temperature")); + } + + @Test + public void test_buildRequestBody_withRequestMaxTokens() { + client.setTestModel("gpt-4o"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().setMaxTokens(1000).addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + assertEquals(1000, body.get("max_tokens")); + } + + @Test + public void test_buildRequestBody_streaming() { + client.setTestModel("gpt-4o"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, true); + + assertEquals(true, body.get("stream")); + } + + @Test + public void test_buildRequestBody_multipleMessages() { + client.setTestModel("gpt-4o"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().addSystemMessage("You are a helpful assistant.") + .addUserMessage("What is the weather?").addAssistantMessage("I cannot access weather information.").addUserMessage("OK"); + + final Map body = client.buildRequestBody(request, false); + + @SuppressWarnings("unchecked") + final List> messages = (List>) body.get("messages"); + assertEquals(4, messages.size()); + + assertEquals("system", messages.get(0).get("role")); + assertEquals("You are a helpful assistant.", messages.get(0).get("content")); + + assertEquals("user", messages.get(1).get("role")); + assertEquals("What is the weather?", messages.get(1).get("content")); + + assertEquals("assistant", messages.get(2).get("role")); + assertEquals("I cannot access weather information.", messages.get(2).get("content")); + + assertEquals("user", messages.get(3).get("role")); + assertEquals("OK", messages.get(3).get("content")); + } + + @Test + public void test_buildRequestBody_blankModelUsesDefault() { + client.setTestModel("gpt-4o"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().setModel("").addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + assertEquals("gpt-4o", body.get("model")); + } + + @Test + public void test_buildRequestBody_nullModelUsesDefault() { + client.setTestModel("gpt-4o"); + client.setTestTemperature(0.7); + client.setTestMaxTokens(4096); + + final LlmChatRequest request = new LlmChatRequest().setModel(null).addUserMessage("Hello"); + + final Map body = client.buildRequestBody(request, false); + + assertEquals("gpt-4o", body.get("model")); + } + + @Test + public void test_init() { + client.setTestTimeout(30000); + client.init(); + assertNotNull(client.getHttpClient()); + } + + @Test + public void test_getHttpClient_lazyInitialization() { + client.setTestTimeout(60000); + // First call should initialize the client + assertNotNull(client.getHttpClient()); + // Second call should return the same client + assertNotNull(client.getHttpClient()); + } + + /** + * Testable subclass of OpenAiLlmClient that allows setting configuration values + * directly without depending on FessConfig. + */ + private static class TestableOpenAiLlmClient extends OpenAiLlmClient { + private String testApiKey = ""; + private String testApiUrl = "https://api.openai.com/v1"; + private String testModel = "gpt-4o"; + private int testTimeout = 60000; + private double testTemperature = 0.7; + private int testMaxTokens = 4096; + + void setTestApiKey(String apiKey) { + this.testApiKey = apiKey; + } + + void setTestApiUrl(String apiUrl) { + this.testApiUrl = apiUrl; + } + + void setTestModel(String model) { + this.testModel = model; + } + + void setTestTimeout(int timeout) { + this.testTimeout = timeout; + } + + void setTestTemperature(double temperature) { + this.testTemperature = temperature; + } + + void setTestMaxTokens(int maxTokens) { + this.testMaxTokens = maxTokens; + } + + @Override + protected String getApiKey() { + return testApiKey; + } + + @Override + protected String getApiUrl() { + return testApiUrl; + } + + @Override + protected String getModel() { + return testModel; + } + + @Override + protected int getTimeout() { + return testTimeout; + } + + @Override + protected double getTemperature() { + return testTemperature; + } + + @Override + protected int getMaxTokens() { + return testMaxTokens; + } + } +} From 2cd30c1f00ffa770a11548c133d74ecd387a7953 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 12 Jan 2026 10:52:35 +0000 Subject: [PATCH 2/2] chore: change default OpenAI model to gpt-5-mini Updates the default model for OpenAI LLM client from gpt-4o to gpt-5-mini in both configuration and test files. --- src/main/resources/fess_config.properties | 2 +- .../fess/llm/openai/OpenAiLlmClientTest.java | 24 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/main/resources/fess_config.properties b/src/main/resources/fess_config.properties index 056a9173a..58bdacc5c 100644 --- a/src/main/resources/fess_config.properties +++ b/src/main/resources/fess_config.properties @@ -1583,7 +1583,7 @@ rag.llm.ollama.timeout=60000 # OpenAI settings. rag.llm.openai.api.key= -rag.llm.openai.model=gpt-4o +rag.llm.openai.model=gpt-5-mini rag.llm.openai.api.url=https://api.openai.com/v1 rag.llm.openai.timeout=60000 diff --git a/src/test/java/org/codelibs/fess/llm/openai/OpenAiLlmClientTest.java b/src/test/java/org/codelibs/fess/llm/openai/OpenAiLlmClientTest.java index a1606f4d1..2f3e038a4 100644 --- a/src/test/java/org/codelibs/fess/llm/openai/OpenAiLlmClientTest.java +++ b/src/test/java/org/codelibs/fess/llm/openai/OpenAiLlmClientTest.java @@ -105,7 +105,7 @@ public void test_convertMessage_system() { @Test public void test_buildRequestBody_defaultValues() { - client.setTestModel("gpt-4o"); + client.setTestModel("gpt-5-mini"); client.setTestTemperature(0.7); client.setTestMaxTokens(4096); @@ -113,7 +113,7 @@ public void test_buildRequestBody_defaultValues() { final Map body = client.buildRequestBody(request, false); - assertEquals("gpt-4o", body.get("model")); + assertEquals("gpt-5-mini", body.get("model")); assertEquals(false, body.get("stream")); assertEquals(0.7, body.get("temperature")); assertEquals(4096, body.get("max_tokens")); @@ -127,7 +127,7 @@ public void test_buildRequestBody_defaultValues() { @Test public void test_buildRequestBody_withRequestModel() { - client.setTestModel("gpt-4o"); + client.setTestModel("gpt-5-mini"); client.setTestTemperature(0.7); client.setTestMaxTokens(4096); @@ -140,7 +140,7 @@ public void test_buildRequestBody_withRequestModel() { @Test public void test_buildRequestBody_withRequestTemperature() { - client.setTestModel("gpt-4o"); + client.setTestModel("gpt-5-mini"); client.setTestTemperature(0.7); client.setTestMaxTokens(4096); @@ -153,7 +153,7 @@ public void test_buildRequestBody_withRequestTemperature() { @Test public void test_buildRequestBody_withRequestMaxTokens() { - client.setTestModel("gpt-4o"); + client.setTestModel("gpt-5-mini"); client.setTestTemperature(0.7); client.setTestMaxTokens(4096); @@ -166,7 +166,7 @@ public void test_buildRequestBody_withRequestMaxTokens() { @Test public void test_buildRequestBody_streaming() { - client.setTestModel("gpt-4o"); + client.setTestModel("gpt-5-mini"); client.setTestTemperature(0.7); client.setTestMaxTokens(4096); @@ -179,7 +179,7 @@ public void test_buildRequestBody_streaming() { @Test public void test_buildRequestBody_multipleMessages() { - client.setTestModel("gpt-4o"); + client.setTestModel("gpt-5-mini"); client.setTestTemperature(0.7); client.setTestMaxTokens(4096); @@ -207,7 +207,7 @@ public void test_buildRequestBody_multipleMessages() { @Test public void test_buildRequestBody_blankModelUsesDefault() { - client.setTestModel("gpt-4o"); + client.setTestModel("gpt-5-mini"); client.setTestTemperature(0.7); client.setTestMaxTokens(4096); @@ -215,12 +215,12 @@ public void test_buildRequestBody_blankModelUsesDefault() { final Map body = client.buildRequestBody(request, false); - assertEquals("gpt-4o", body.get("model")); + assertEquals("gpt-5-mini", body.get("model")); } @Test public void test_buildRequestBody_nullModelUsesDefault() { - client.setTestModel("gpt-4o"); + client.setTestModel("gpt-5-mini"); client.setTestTemperature(0.7); client.setTestMaxTokens(4096); @@ -228,7 +228,7 @@ public void test_buildRequestBody_nullModelUsesDefault() { final Map body = client.buildRequestBody(request, false); - assertEquals("gpt-4o", body.get("model")); + assertEquals("gpt-5-mini", body.get("model")); } @Test @@ -254,7 +254,7 @@ public void test_getHttpClient_lazyInitialization() { private static class TestableOpenAiLlmClient extends OpenAiLlmClient { private String testApiKey = ""; private String testApiUrl = "https://api.openai.com/v1"; - private String testModel = "gpt-4o"; + private String testModel = "gpt-5-mini"; private int testTimeout = 60000; private double testTemperature = 0.7; private int testMaxTokens = 4096;