-->

OpenAI

AI-Mocks OpenAI is a specialized mock server implementation for mocking the OpenAI API, built using Mokksy.

MockOpenai is tested against official openai-java SDK and popular JVM AI frameworks: LangChain4j and Spring AI.

Currently, it supports ChatCompletion and Streaming ChatCompletion requests.

Add Dependency Include the library in your test dependencies (Maven or Gradle).

1implementation("me.kpavlov.aimocks:ai-mocks-openai-jvm:$latestVersion")
1<dependency>
2  <groupId>me.kpavlov.aimocks</groupId>
3  <artifactId>ai-mocks-openai-jvm</artifactId>
4  <version>[LATEST_VERSION]</version>
5</dependency>

Set up a mock server and define mock responses:

1val openai = MockOpenai(verbose = true)

Let's simulate OpenAI Chat Completions API:

 1// Define mock response
 2openai.completion {
 3  temperature = 0.7
 4  seed = 42
 5  model = "gpt-4o-mini"
 6  maxTokens = 100
 7  topP = 0.95
 8  systemMessageContains("helpful assistant")
 9  userMessageContains("say 'Hello!'")
10} responds {
11  assistantContent = "Hello"
12  finishReason = "stop"
13  delay = 200.milliseconds // delay before answer
14}
15
16// OpenAI client setup
17val client: OpenAIClient =
18  OpenAIOkHttpClient
19    .builder()
20    .apiKey("dummy-api-key")
21    .baseUrl(openai.baseUrl()) // connect to mock OpenAI
22    .responseValidation(true)
23    .build()
24
25// Use the mock endpoint
26val params =
27  ChatCompletionCreateParams
28    .builder()
29    .temperature(0.7)
30    .maxCompletionTokens(100)
31    .topP(0.95)
32    .seed(42)
33    .messages(
34      listOf(
35        ChatCompletionMessageParam.ofSystem(
36          ChatCompletionSystemMessageParam
37            .builder()
38            .content(
39              "You are a helpful assistant.",
40            ).build(),
41        ),
42        ChatCompletionMessageParam.ofUser(
43          ChatCompletionUserMessageParam
44            .builder()
45            .content("Just say 'Hello!' and nothing else")
46            .build(),
47        ),
48      ),
49    ).model("gpt-4o-mini")
50    .build()
51
52val result: ChatCompletion =
53  client
54    .chat()
55    .completions()
56    .create(params)
57
58println(result)

With AI-Mocks it is possible to test negative scenarios, such as erroneous responses and delays.

 1openai.completion {
 2  temperature = 0.7
 3  seed = 42
 4  model = "gpt-4o-mini"
 5  maxTokens = 100
 6  topP = 0.95
 7  systemMessageContains("helpful assistant")
 8  userMessageContains("say 'Hello!'")
 9} respondsError {
10  body =
11    // language=json
12    """
13    {
14      "caramba": "Arrr, blast me barnacles! This be not what ye expect! 🏴‍☠️"
15    }
16    """.trimIndent()
17  delay = 1.seconds
18  httpStatus = HttpStatusCode.PaymentRequired
19}

You may use also LangChain4J Kotlin Extensions:

 1val model: OpenAiChatModel =
 2  OpenAiChatModel
 3    .builder()
 4    .apiKey("dummy-api-key")
 5    .baseUrl(openai.baseUrl())
 6    .build()
 7
 8val result =
 9  model.chatAsync {
10    parameters =
11      OpenAiChatRequestParameters
12        .builder()
13        .temperature(0.7)
14        .modelName("gpt-4o-mini")
15        .maxCompletionTokens(100)
16        .topP(0.95)
17        .seed(42)
18        .build()
19    messages += userMessage("Say Hello")
20  }
21
22println(result)

Mock streaming responses easily with flow support:

 1// configure mock openai
 2openai.completion {
 3  temperature = 0.7
 4  model = "gpt-4o-mini"
 5  maxTokens = 100
 6  topP = 0.95
 7  topK = 40
 8  seed = 42
 9  userMessageContains("What is in the sea?")
10} respondsStream {
11  responseFlow =
12    flow {
13      emit("Yellow")
14      emit(" submarine")
15    }
16  finishReason = "stop"
17  delay = 60.milliseconds
18  delayBetweenChunks = 15.milliseconds
19
20  // send "[DONE]" as last message to finish the stream in openai4j
21  sendDone = true
22}
23
24// create streaming model (a client)
25val model: OpenAiStreamingChatModel =
26  OpenAiStreamingChatModel
27    .builder()
28    .apiKey("foo")
29    .baseUrl(openai.baseUrl())
30    .build()
31
32// call streaming model
33model
34  .chatFlow {
35    parameters =
36      ChatRequestParameters
37        .builder()
38        .temperature(0.7)
39        .modelName("gpt-4o-mini")
40        .maxTokens(100)
41        .topP(0.95)
42        .topK(40)
43        .seed(42)
44        .build()
45    messages += userMessage("What is in the sea?")
46  }.collect {
47    when (it) {
48      is PartialResponse -> {
49        println("token = ${it.token}")
50      }
51
52      is CompleteResponse -> {
53        println("Completed: $it")
54      }
55
56      else -> {
57        println("Something else = $it")
58      }
59    }
60  }

To test Spring-AI integration:

 1// create mock server
 2val openai = MockOpenai(verbose = true)
 3
 4// create Spring-AI client
 5val chatClient =
 6  ChatClient
 7    .builder(
 8      org.springframework.ai.openai.OpenAiChatModel
 9        .builder()
10        .openAiApi(
11          OpenAiApi
12            .builder()
13            .apiKey("demo-key")
14            .baseUrl(openai.baseUrl())
15            .build(),
16        ).build(),
17    ).build()
18
19// Set up a mock for the LLM call
20openai.completion {
21  temperature = 0.7
22  seed = 42
23  model = "gpt-4o-mini"
24  maxTokens = 100
25  topP = 0.95
26  topK = 40
27  systemMessageContains("helpful pirate")
28  userMessageContains("say 'Hello!'")
29} responds {
30  assistantContent = "Ahoy there, matey! Hello!"
31  finishReason = "stop"
32  delay = 200.milliseconds
33}
34
35// Configure Spring-AI client call
36val response =
37  chatClient
38    .prompt()
39    .system("You are a helpful pirate")
40    .user("Just say 'Hello!'")
41    .options<OpenAiChatOptions>(
42      OpenAiChatOptions
43        .builder()
44        .maxCompletionTokens(100)
45        .temperature(0.7)
46        .topP(0.95)
47        .topK(40)
48        .model("gpt-4o-mini")
49        .seed(42)
50        .build(),
51    )
52    // Make a call
53    .call()
54    .chatResponse()
55
56// Verify the response
57response?.result shouldNotBe null
58response?.result?.apply {
59metadata.finishReason shouldBe "STOP"
60output?.text shouldBe "Ahoy there, matey! Hello!"
61}

Check for examples in the integration tests.