Skip to content

Commit 42a5233

Browse files
authored
Merge pull request #3881 from jooby-project/3880
feat(langchain4j): add Jooby extension for LangChain4j with resilient…
2 parents ae7db65 + 450d984 commit 42a5233

File tree

17 files changed

+1444
-1
lines changed

17 files changed

+1444
-1
lines changed
Lines changed: 298 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,298 @@
1+
== LangChain4j
2+
3+
AI and Large Language Model (LLM) integration using the https://github.com/langchain4j/langchain4j[LangChain4j] framework.
4+
5+
This module automates the instantiation and registration of `ChatModel` and `StreamingChatModel` components based on your application configuration. It supports built-in providers (OpenAI, Anthropic, Ollama, Jlama), seamless fallback routing for high availability, and custom provider registration.
6+
7+
=== Usage
8+
9+
1) Add the dependency:
10+
11+
[dependency, artifactId="jooby-langchain4j"]
12+
.
13+
14+
2) Add the dependency for your chosen AI provider (e.g., OpenAI):
15+
16+
[dependency, groupId="dev.langchain4j", artifactId="langchain4j-open-ai", version="${langchain4j.version}"]
17+
.
18+
19+
3) Configure your models in `application.conf`:
20+
21+
[source, hocon]
22+
----
23+
langchain4j {
24+
models {
25+
gpt-assistant {
26+
provider = "openai"
27+
api-key = ${OPENAI_API_KEY}
28+
model-name = "gpt-4o-mini"
29+
timeout = 30s
30+
}
31+
}
32+
}
33+
----
34+
35+
4) Install the module and require the model:
36+
37+
.Java
38+
[source, java, role="primary"]
39+
----
40+
import io.jooby.langchain4j.LangChain4jModule;
41+
import dev.langchain4j.model.chat.ChatModel;
42+
43+
{
44+
install(new LangChain4jModule()); <1>
45+
46+
get("/chat", ctx -> {
47+
ChatModel ai = require(ChatModel.class); <2>
48+
String prompt = ctx.query("q").value("Tell me a joke");
49+
50+
return ai.chat(prompt); <3>
51+
});
52+
}
53+
----
54+
55+
.Kotlin
56+
[source, kt, role="secondary"]
57+
----
58+
import io.jooby.langchain4j.LangChain4jModule
59+
import dev.langchain4j.model.chat.ChatModel
60+
61+
{
62+
install(LangChain4jModule()) <1>
63+
64+
get("/chat") {
65+
val ai = require<ChatModel>() <2>
66+
val prompt = ctx.query("q").value("Tell me a joke")
67+
68+
ai.chat(prompt) <3>
69+
}
70+
}
71+
----
72+
73+
<1> Install the LangChain4j module. It will automatically parse the configuration and build the models.
74+
<2> Request the default `ChatModel` from the service registry.
75+
<3> Execute the blocking chat request.
76+
77+
=== Streaming Responses
78+
79+
If your provider supports streaming, the module automatically registers a `StreamingChatModel` which pairs perfectly with Jooby's Server-Sent Events (SSE).
80+
81+
.Java
82+
[source, java, role="primary"]
83+
----
84+
import dev.langchain4j.model.chat.StreamingChatModel;
85+
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
86+
import dev.langchain4j.model.chat.response.ChatResponse;
87+
88+
{
89+
sse("/chat/stream", sse -> {
90+
StreamingChatModel ai = require(StreamingChatModel.class);
91+
92+
ai.chat("Write a long story", new StreamingChatResponseHandler() {
93+
@Override
94+
public void onPartialResponse(String token) {
95+
sse.send(token); <1>
96+
}
97+
98+
@Override
99+
public void onCompleteResponse(ChatResponse response) {
100+
sse.close(); <2>
101+
}
102+
103+
@Override
104+
public void onError(Throwable error) {
105+
sse.send("[ERROR] " + error.getMessage());
106+
sse.close();
107+
}
108+
});
109+
});
110+
}
111+
----
112+
113+
.Kotlin
114+
[source, kt, role="secondary"]
115+
----
116+
import dev.langchain4j.model.chat.StreamingChatModel
117+
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler
118+
import dev.langchain4j.model.chat.response.ChatResponse
119+
120+
{
121+
sse("/chat/stream") { sse ->
122+
val ai = require<StreamingChatModel>()
123+
124+
ai.chat("Write a long story", object : StreamingChatResponseHandler {
125+
override fun onPartialResponse(token: String) {
126+
sse.send(token) <1>
127+
}
128+
129+
override fun onCompleteResponse(response: ChatResponse) {
130+
sse.close() <2>
131+
}
132+
133+
override fun onError(error: Throwable) {
134+
sse.send("[ERROR] ${error.message}")
135+
sse.close()
136+
}
137+
})
138+
}
139+
}
140+
----
141+
142+
<1> Stream partial tokens back to the client as they are generated.
143+
<2> Close the SSE connection when the model finishes.
144+
145+
=== Resilience & Fallbacks
146+
147+
Network timeouts and API rate limits happen. You can configure a chain of fallbacks to ensure high availability. If the primary model fails, the module automatically routes the request to the next configured fallback.
148+
149+
1) Configure the fallback chain in `application.conf`:
150+
151+
[source, hocon]
152+
----
153+
langchain4j.models {
154+
primary-agent {
155+
provider = "openai"
156+
api-key = ${OPENAI_API_KEY}
157+
fallback = ["local-failover"] <1>
158+
}
159+
160+
local-failover {
161+
provider = "jlama"
162+
model-name = "tjake/Llama-3.2-1B-Instruct-JQ4"
163+
}
164+
}
165+
----
166+
<1> Instructs the module to wrap `primary-agent` with a fallback decorator pointing to `local-failover`.
167+
168+
2) Attach a listener to monitor when failovers occur:
169+
170+
.Java
171+
[source, java, role="primary"]
172+
----
173+
import io.jooby.langchain4j.LangChain4jModule;
174+
175+
{
176+
install(new LangChain4jModule()
177+
.failoverListener((modelName, error) -> {
178+
System.err.println("Model " + modelName + " failed: " + error.getMessage());
179+
})
180+
);
181+
}
182+
----
183+
184+
.Kotlin
185+
[source, kt, role="secondary"]
186+
----
187+
import io.jooby.langchain4j.LangChain4jModule
188+
189+
{
190+
install(LangChain4jModule()
191+
.failoverListener { modelName, error ->
192+
println("Model $modelName failed: ${error.message}")
193+
}
194+
)
195+
}
196+
----
197+
198+
=== Registering Custom Providers
199+
200+
The module includes built-in support for `openai`, `anthropic`, `ollama`, and `jlama`. To add support for an unlisted provider (e.g., Google Vertex AI), you can register a custom `ChatModelFactory`.
201+
202+
.Java
203+
[source, java, role="primary"]
204+
----
205+
import io.jooby.langchain4j.LangChain4jModule;
206+
import io.jooby.langchain4j.ChatModelFactory;
207+
import dev.langchain4j.model.chat.ChatModel;
208+
import dev.langchain4j.model.chat.StreamingChatModel;
209+
import com.typesafe.config.Config;
210+
211+
{
212+
install(new LangChain4jModule()
213+
.register("vertex", new ChatModelFactory() { <1>
214+
@Override
215+
public ChatModel createChatModel(Config config) {
216+
return VertexAiGeminiChatModel.builder()
217+
.project(config.getString("project"))
218+
.location(config.getString("location"))
219+
.build();
220+
}
221+
222+
@Override
223+
public StreamingChatModel createStreamingModel(Config config) {
224+
return VertexAiGeminiStreamingChatModel.builder() <2>
225+
.project(config.getString("project"))
226+
.location(config.getString("location"))
227+
.build();
228+
}
229+
})
230+
);
231+
}
232+
----
233+
234+
.Kotlin
235+
[source, kt, role="secondary"]
236+
----
237+
import io.jooby.langchain4j.LangChain4jModule
238+
import io.jooby.langchain4j.ChatModelFactory
239+
import dev.langchain4j.model.chat.ChatModel
240+
import dev.langchain4j.model.chat.StreamingChatModel
241+
import com.typesafe.config.Config
242+
243+
{
244+
install(LangChain4jModule()
245+
.register("vertex", object : ChatModelFactory { <1>
246+
override fun createChatModel(config: Config): ChatModel {
247+
return VertexAiGeminiChatModel.builder()
248+
.project(config.getString("project"))
249+
.location(config.getString("location"))
250+
.build()
251+
}
252+
253+
override fun createStreamingModel(config: Config): StreamingChatModel {
254+
return VertexAiGeminiStreamingChatModel.builder() <2>
255+
.project(config.getString("project"))
256+
.location(config.getString("location"))
257+
.build()
258+
}
259+
})
260+
)
261+
}
262+
----
263+
<1> Register the custom provider name matching the `provider` key in your `.conf` file.
264+
<2> `createStreamingModel` is implemented as an optional default method in the interface. Not all providers support streaming. If your chosen provider does not support it, simply do not override this method (it returns `null` by default).
265+
266+
==== Accessing the Concrete Implementation
267+
268+
While you should generally interact with models via the standard `ChatModel` and `StreamingChatModel` interfaces, the module also registers the exact class implementation in Jooby's Service Registry.
269+
270+
If you need to access provider-specific methods on the actual builder output, you can require the concrete class directly:
271+
272+
.Java
273+
[source, java, role="primary"]
274+
----
275+
import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel;
276+
277+
{
278+
get("/vertex-specific", ctx -> {
279+
// Retrieve the exact underlying implementation
280+
VertexAiGeminiChatModel gemini = require(VertexAiGeminiChatModel.class);
281+
// ...
282+
});
283+
}
284+
----
285+
286+
.Kotlin
287+
[source, kt, role="secondary"]
288+
----
289+
import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel
290+
291+
{
292+
get("/vertex-specific") {
293+
// Retrieve the exact underlying implementation
294+
val gemini = require<VertexAiGeminiChatModel>()
295+
// ...
296+
}
297+
}
298+
----

docs/asciidoc/modules/modules.adoc

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@ Unlike other frameworks, Jooby modules **do not** create new layers of abstracti
66

77
Modules are distributed as separate dependencies. Below is the catalog of officially supported Jooby modules:
88

9+
==== AI
10+
* link:{uiVersion}/modules/langchain4j[LangChain4j]: Supercharge your Java application with the power of LLMs.
11+
912
==== Cloud
1013
* link:{uiVersion}/modules/awssdkv2[AWS-SDK v2]: Amazon Web Service module SDK 2.
1114
* link:{uiVersion}/modules/aws[AWS SDK v1]: Amazon Web Service module SDK 1.

modules/jooby-langchain4j/pom.xml

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
3+
4+
<modelVersion>4.0.0</modelVersion>
5+
6+
<parent>
7+
<groupId>io.jooby</groupId>
8+
<artifactId>modules</artifactId>
9+
<version>4.0.17-SNAPSHOT</version>
10+
</parent>
11+
<artifactId>jooby-langchain4j</artifactId>
12+
<name>jooby-langchain4j</name>
13+
14+
<dependencies>
15+
<dependency>
16+
<groupId>io.jooby</groupId>
17+
<artifactId>jooby</artifactId>
18+
<version>${jooby.version}</version>
19+
</dependency>
20+
21+
<dependency>
22+
<groupId>dev.langchain4j</groupId>
23+
<artifactId>langchain4j-core</artifactId>
24+
</dependency>
25+
26+
<dependency>
27+
<groupId>dev.langchain4j</groupId>
28+
<artifactId>langchain4j-open-ai</artifactId>
29+
<optional>true</optional>
30+
</dependency>
31+
32+
<dependency>
33+
<groupId>dev.langchain4j</groupId>
34+
<artifactId>langchain4j-anthropic</artifactId>
35+
<optional>true</optional>
36+
</dependency>
37+
38+
<dependency>
39+
<groupId>dev.langchain4j</groupId>
40+
<artifactId>langchain4j-ollama</artifactId>
41+
<optional>true</optional>
42+
</dependency>
43+
44+
<dependency>
45+
<groupId>dev.langchain4j</groupId>
46+
<artifactId>langchain4j-jlama</artifactId>
47+
<optional>true</optional>
48+
</dependency>
49+
50+
<!-- Test dependencies -->
51+
<dependency>
52+
<groupId>org.junit.jupiter</groupId>
53+
<artifactId>junit-jupiter-engine</artifactId>
54+
<scope>test</scope>
55+
</dependency>
56+
57+
<dependency>
58+
<groupId>org.mockito</groupId>
59+
<artifactId>mockito-core</artifactId>
60+
<scope>test</scope>
61+
</dependency>
62+
63+
<dependency>
64+
<groupId>org.jacoco</groupId>
65+
<artifactId>org.jacoco.agent</artifactId>
66+
<classifier>runtime</classifier>
67+
<scope>test</scope>
68+
</dependency>
69+
</dependencies>
70+
71+
<dependencyManagement>
72+
<dependencies>
73+
<dependency>
74+
<groupId>dev.langchain4j</groupId>
75+
<artifactId>langchain4j-bom</artifactId>
76+
<version>1.12.2</version>
77+
<type>pom</type>
78+
<scope>import</scope>
79+
</dependency>
80+
</dependencies>
81+
</dependencyManagement>
82+
</project>

0 commit comments

Comments
 (0)