|
| 1 | +== LangChain4j |
| 2 | + |
| 3 | +AI and Large Language Model (LLM) integration using the https://github.com/langchain4j/langchain4j[LangChain4j] framework. |
| 4 | + |
| 5 | +This module automates the instantiation and registration of `ChatModel` and `StreamingChatModel` components based on your application configuration. It supports built-in providers (OpenAI, Anthropic, Ollama, Jlama), seamless fallback routing for high availability, and custom provider registration. |
| 6 | + |
| 7 | +=== Usage |
| 8 | + |
| 9 | +1) Add the dependency: |
| 10 | + |
| 11 | +[dependency, artifactId="jooby-langchain4j"] |
| 12 | +. |
| 13 | + |
| 14 | +2) Add the dependency for your chosen AI provider (e.g., OpenAI): |
| 15 | + |
| 16 | +[dependency, groupId="dev.langchain4j", artifactId="langchain4j-open-ai", version="${langchain4j.version}"] |
| 17 | +. |
| 18 | + |
| 19 | +3) Configure your models in `application.conf`: |
| 20 | + |
| 21 | +[source, hocon] |
| 22 | +---- |
| 23 | +langchain4j { |
| 24 | + models { |
| 25 | + gpt-assistant { |
| 26 | + provider = "openai" |
| 27 | + api-key = ${OPENAI_API_KEY} |
| 28 | + model-name = "gpt-4o-mini" |
| 29 | + timeout = 30s |
| 30 | + } |
| 31 | + } |
| 32 | +} |
| 33 | +---- |
| 34 | + |
| 35 | +4) Install the module and require the model: |
| 36 | + |
| 37 | +.Java |
| 38 | +[source, java, role="primary"] |
| 39 | +---- |
| 40 | +import io.jooby.langchain4j.LangChain4jModule; |
| 41 | +import dev.langchain4j.model.chat.ChatModel; |
| 42 | +
|
| 43 | +{ |
| 44 | + install(new LangChain4jModule()); <1> |
| 45 | +
|
| 46 | + get("/chat", ctx -> { |
| 47 | + ChatModel ai = require(ChatModel.class); <2> |
| 48 | + String prompt = ctx.query("q").value("Tell me a joke"); |
| 49 | + |
| 50 | + return ai.chat(prompt); <3> |
| 51 | + }); |
| 52 | +} |
| 53 | +---- |
| 54 | + |
| 55 | +.Kotlin |
| 56 | +[source, kt, role="secondary"] |
| 57 | +---- |
| 58 | +import io.jooby.langchain4j.LangChain4jModule |
| 59 | +import dev.langchain4j.model.chat.ChatModel |
| 60 | +
|
| 61 | +{ |
| 62 | + install(LangChain4jModule()) <1> |
| 63 | +
|
| 64 | + get("/chat") { |
| 65 | + val ai = require<ChatModel>() <2> |
| 66 | + val prompt = ctx.query("q").value("Tell me a joke") |
| 67 | + |
| 68 | + ai.chat(prompt) <3> |
| 69 | + } |
| 70 | +} |
| 71 | +---- |
| 72 | + |
| 73 | +<1> Install the LangChain4j module. It will automatically parse the configuration and build the models. |
| 74 | +<2> Request the default `ChatModel` from the service registry. |
| 75 | +<3> Execute the blocking chat request. |
| 76 | + |
| 77 | +=== Streaming Responses |
| 78 | + |
| 79 | +If your provider supports streaming, the module automatically registers a `StreamingChatModel` which pairs perfectly with Jooby's Server-Sent Events (SSE). |
| 80 | + |
| 81 | +.Java |
| 82 | +[source, java, role="primary"] |
| 83 | +---- |
| 84 | +import dev.langchain4j.model.chat.StreamingChatModel; |
| 85 | +import dev.langchain4j.model.chat.response.StreamingChatResponseHandler; |
| 86 | +import dev.langchain4j.model.chat.response.ChatResponse; |
| 87 | +
|
| 88 | +{ |
| 89 | + sse("/chat/stream", sse -> { |
| 90 | + StreamingChatModel ai = require(StreamingChatModel.class); |
| 91 | +
|
| 92 | + ai.chat("Write a long story", new StreamingChatResponseHandler() { |
| 93 | + @Override |
| 94 | + public void onPartialResponse(String token) { |
| 95 | + sse.send(token); <1> |
| 96 | + } |
| 97 | +
|
| 98 | + @Override |
| 99 | + public void onCompleteResponse(ChatResponse response) { |
| 100 | + sse.close(); <2> |
| 101 | + } |
| 102 | +
|
| 103 | + @Override |
| 104 | + public void onError(Throwable error) { |
| 105 | + sse.send("[ERROR] " + error.getMessage()); |
| 106 | + sse.close(); |
| 107 | + } |
| 108 | + }); |
| 109 | + }); |
| 110 | +} |
| 111 | +---- |
| 112 | + |
| 113 | +.Kotlin |
| 114 | +[source, kt, role="secondary"] |
| 115 | +---- |
| 116 | +import dev.langchain4j.model.chat.StreamingChatModel |
| 117 | +import dev.langchain4j.model.chat.response.StreamingChatResponseHandler |
| 118 | +import dev.langchain4j.model.chat.response.ChatResponse |
| 119 | +
|
| 120 | +{ |
| 121 | + sse("/chat/stream") { sse -> |
| 122 | + val ai = require<StreamingChatModel>() |
| 123 | +
|
| 124 | + ai.chat("Write a long story", object : StreamingChatResponseHandler { |
| 125 | + override fun onPartialResponse(token: String) { |
| 126 | + sse.send(token) <1> |
| 127 | + } |
| 128 | +
|
| 129 | + override fun onCompleteResponse(response: ChatResponse) { |
| 130 | + sse.close() <2> |
| 131 | + } |
| 132 | +
|
| 133 | + override fun onError(error: Throwable) { |
| 134 | + sse.send("[ERROR] ${error.message}") |
| 135 | + sse.close() |
| 136 | + } |
| 137 | + }) |
| 138 | + } |
| 139 | +} |
| 140 | +---- |
| 141 | + |
| 142 | +<1> Stream partial tokens back to the client as they are generated. |
| 143 | +<2> Close the SSE connection when the model finishes. |
| 144 | + |
| 145 | +=== Resilience & Fallbacks |
| 146 | + |
| 147 | +Network timeouts and API rate limits happen. You can configure a chain of fallbacks to ensure high availability. If the primary model fails, the module automatically routes the request to the next configured fallback. |
| 148 | + |
| 149 | +1) Configure the fallback chain in `application.conf`: |
| 150 | + |
| 151 | +[source, hocon] |
| 152 | +---- |
| 153 | +langchain4j.models { |
| 154 | + primary-agent { |
| 155 | + provider = "openai" |
| 156 | + api-key = ${OPENAI_API_KEY} |
| 157 | + fallback = ["local-failover"] <1> |
| 158 | + } |
| 159 | +
|
| 160 | + local-failover { |
| 161 | + provider = "jlama" |
| 162 | + model-name = "tjake/Llama-3.2-1B-Instruct-JQ4" |
| 163 | + } |
| 164 | +} |
| 165 | +---- |
| 166 | +<1> Instructs the module to wrap `primary-agent` with a fallback decorator pointing to `local-failover`. |
| 167 | + |
| 168 | +2) Attach a listener to monitor when failovers occur: |
| 169 | + |
| 170 | +.Java |
| 171 | +[source, java, role="primary"] |
| 172 | +---- |
| 173 | +import io.jooby.langchain4j.LangChain4jModule; |
| 174 | +
|
| 175 | +{ |
| 176 | + install(new LangChain4jModule() |
| 177 | + .failoverListener((modelName, error) -> { |
| 178 | + System.err.println("Model " + modelName + " failed: " + error.getMessage()); |
| 179 | + }) |
| 180 | + ); |
| 181 | +} |
| 182 | +---- |
| 183 | + |
| 184 | +.Kotlin |
| 185 | +[source, kt, role="secondary"] |
| 186 | +---- |
| 187 | +import io.jooby.langchain4j.LangChain4jModule |
| 188 | +
|
| 189 | +{ |
| 190 | + install(LangChain4jModule() |
| 191 | + .failoverListener { modelName, error -> |
| 192 | + println("Model $modelName failed: ${error.message}") |
| 193 | + } |
| 194 | + ) |
| 195 | +} |
| 196 | +---- |
| 197 | + |
| 198 | +=== Registering Custom Providers |
| 199 | + |
| 200 | +The module includes built-in support for `openai`, `anthropic`, `ollama`, and `jlama`. To add support for an unlisted provider (e.g., Google Vertex AI), you can register a custom `ChatModelFactory`. |
| 201 | + |
| 202 | +.Java |
| 203 | +[source, java, role="primary"] |
| 204 | +---- |
| 205 | +import io.jooby.langchain4j.LangChain4jModule; |
| 206 | +import io.jooby.langchain4j.ChatModelFactory; |
| 207 | +import dev.langchain4j.model.chat.ChatModel; |
| 208 | +import dev.langchain4j.model.chat.StreamingChatModel; |
| 209 | +import com.typesafe.config.Config; |
| 210 | +
|
| 211 | +{ |
| 212 | + install(new LangChain4jModule() |
| 213 | + .register("vertex", new ChatModelFactory() { <1> |
| 214 | + @Override |
| 215 | + public ChatModel createChatModel(Config config) { |
| 216 | + return VertexAiGeminiChatModel.builder() |
| 217 | + .project(config.getString("project")) |
| 218 | + .location(config.getString("location")) |
| 219 | + .build(); |
| 220 | + } |
| 221 | +
|
| 222 | + @Override |
| 223 | + public StreamingChatModel createStreamingModel(Config config) { |
| 224 | + return VertexAiGeminiStreamingChatModel.builder() <2> |
| 225 | + .project(config.getString("project")) |
| 226 | + .location(config.getString("location")) |
| 227 | + .build(); |
| 228 | + } |
| 229 | + }) |
| 230 | + ); |
| 231 | +} |
| 232 | +---- |
| 233 | + |
| 234 | +.Kotlin |
| 235 | +[source, kt, role="secondary"] |
| 236 | +---- |
| 237 | +import io.jooby.langchain4j.LangChain4jModule |
| 238 | +import io.jooby.langchain4j.ChatModelFactory |
| 239 | +import dev.langchain4j.model.chat.ChatModel |
| 240 | +import dev.langchain4j.model.chat.StreamingChatModel |
| 241 | +import com.typesafe.config.Config |
| 242 | +
|
| 243 | +{ |
| 244 | + install(LangChain4jModule() |
| 245 | + .register("vertex", object : ChatModelFactory { <1> |
| 246 | + override fun createChatModel(config: Config): ChatModel { |
| 247 | + return VertexAiGeminiChatModel.builder() |
| 248 | + .project(config.getString("project")) |
| 249 | + .location(config.getString("location")) |
| 250 | + .build() |
| 251 | + } |
| 252 | +
|
| 253 | + override fun createStreamingModel(config: Config): StreamingChatModel { |
| 254 | + return VertexAiGeminiStreamingChatModel.builder() <2> |
| 255 | + .project(config.getString("project")) |
| 256 | + .location(config.getString("location")) |
| 257 | + .build() |
| 258 | + } |
| 259 | + }) |
| 260 | + ) |
| 261 | +} |
| 262 | +---- |
| 263 | +<1> Register the custom provider name matching the `provider` key in your `.conf` file. |
| 264 | +<2> `createStreamingModel` is implemented as an optional default method in the interface. Not all providers support streaming. If your chosen provider does not support it, simply do not override this method (it returns `null` by default). |
| 265 | + |
| 266 | +==== Accessing the Concrete Implementation |
| 267 | + |
| 268 | +While you should generally interact with models via the standard `ChatModel` and `StreamingChatModel` interfaces, the module also registers the exact class implementation in Jooby's Service Registry. |
| 269 | + |
| 270 | +If you need to access provider-specific methods on the actual builder output, you can require the concrete class directly: |
| 271 | + |
| 272 | +.Java |
| 273 | +[source, java, role="primary"] |
| 274 | +---- |
| 275 | +import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; |
| 276 | +
|
| 277 | +{ |
| 278 | + get("/vertex-specific", ctx -> { |
| 279 | + // Retrieve the exact underlying implementation |
| 280 | + VertexAiGeminiChatModel gemini = require(VertexAiGeminiChatModel.class); |
| 281 | + // ... |
| 282 | + }); |
| 283 | +} |
| 284 | +---- |
| 285 | + |
| 286 | +.Kotlin |
| 287 | +[source, kt, role="secondary"] |
| 288 | +---- |
| 289 | +import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel |
| 290 | +
|
| 291 | +{ |
| 292 | + get("/vertex-specific") { |
| 293 | + // Retrieve the exact underlying implementation |
| 294 | + val gemini = require<VertexAiGeminiChatModel>() |
| 295 | + // ... |
| 296 | + } |
| 297 | +} |
| 298 | +---- |
0 commit comments