diff --git a/docs/asciidoc/modules/langchain4j.adoc b/docs/asciidoc/modules/langchain4j.adoc new file mode 100644 index 0000000000..a8a6abafdb --- /dev/null +++ b/docs/asciidoc/modules/langchain4j.adoc @@ -0,0 +1,298 @@ +== LangChain4j + +AI and Large Language Model (LLM) integration using the https://github.com/langchain4j/langchain4j[LangChain4j] framework. + +This module automates the instantiation and registration of `ChatModel` and `StreamingChatModel` components based on your application configuration. It supports built-in providers (OpenAI, Anthropic, Ollama, Jlama), seamless fallback routing for high availability, and custom provider registration. + +=== Usage + +1) Add the dependency: + +[dependency, artifactId="jooby-langchain4j"] +. + +2) Add the dependency for your chosen AI provider (e.g., OpenAI): + +[dependency, groupId="dev.langchain4j", artifactId="langchain4j-open-ai", version="${langchain4j.version}"] +. + +3) Configure your models in `application.conf`: + +[source, hocon] +---- +langchain4j { + models { + gpt-assistant { + provider = "openai" + api-key = ${OPENAI_API_KEY} + model-name = "gpt-4o-mini" + timeout = 30s + } + } +} +---- + +4) Install the module and require the model: + +.Java +[source, java, role="primary"] +---- +import io.jooby.langchain4j.LangChain4jModule; +import dev.langchain4j.model.chat.ChatModel; + +{ + install(new LangChain4jModule()); <1> + + get("/chat", ctx -> { + ChatModel ai = require(ChatModel.class); <2> + String prompt = ctx.query("q").value("Tell me a joke"); + + return ai.chat(prompt); <3> + }); +} +---- + +.Kotlin +[source, kt, role="secondary"] +---- +import io.jooby.langchain4j.LangChain4jModule +import dev.langchain4j.model.chat.ChatModel + +{ + install(LangChain4jModule()) <1> + + get("/chat") { + val ai = require() <2> + val prompt = ctx.query("q").value("Tell me a joke") + + ai.chat(prompt) <3> + } +} +---- + +<1> Install the LangChain4j module. It will automatically parse the configuration and build the models. +<2> Request the default `ChatModel` from the service registry. +<3> Execute the blocking chat request. + +=== Streaming Responses + +If your provider supports streaming, the module automatically registers a `StreamingChatModel` which pairs perfectly with Jooby's Server-Sent Events (SSE). + +.Java +[source, java, role="primary"] +---- +import dev.langchain4j.model.chat.StreamingChatModel; +import dev.langchain4j.model.chat.response.StreamingChatResponseHandler; +import dev.langchain4j.model.chat.response.ChatResponse; + +{ + sse("/chat/stream", sse -> { + StreamingChatModel ai = require(StreamingChatModel.class); + + ai.chat("Write a long story", new StreamingChatResponseHandler() { + @Override + public void onPartialResponse(String token) { + sse.send(token); <1> + } + + @Override + public void onCompleteResponse(ChatResponse response) { + sse.close(); <2> + } + + @Override + public void onError(Throwable error) { + sse.send("[ERROR] " + error.getMessage()); + sse.close(); + } + }); + }); +} +---- + +.Kotlin +[source, kt, role="secondary"] +---- +import dev.langchain4j.model.chat.StreamingChatModel +import dev.langchain4j.model.chat.response.StreamingChatResponseHandler +import dev.langchain4j.model.chat.response.ChatResponse + +{ + sse("/chat/stream") { sse -> + val ai = require() + + ai.chat("Write a long story", object : StreamingChatResponseHandler { + override fun onPartialResponse(token: String) { + sse.send(token) <1> + } + + override fun onCompleteResponse(response: ChatResponse) { + sse.close() <2> + } + + override fun onError(error: Throwable) { + sse.send("[ERROR] ${error.message}") + sse.close() + } + }) + } +} +---- + +<1> Stream partial tokens back to the client as they are generated. +<2> Close the SSE connection when the model finishes. + +=== Resilience & Fallbacks + +Network timeouts and API rate limits happen. You can configure a chain of fallbacks to ensure high availability. If the primary model fails, the module automatically routes the request to the next configured fallback. + +1) Configure the fallback chain in `application.conf`: + +[source, hocon] +---- +langchain4j.models { + primary-agent { + provider = "openai" + api-key = ${OPENAI_API_KEY} + fallback = ["local-failover"] <1> + } + + local-failover { + provider = "jlama" + model-name = "tjake/Llama-3.2-1B-Instruct-JQ4" + } +} +---- +<1> Instructs the module to wrap `primary-agent` with a fallback decorator pointing to `local-failover`. + +2) Attach a listener to monitor when failovers occur: + +.Java +[source, java, role="primary"] +---- +import io.jooby.langchain4j.LangChain4jModule; + +{ + install(new LangChain4jModule() + .failoverListener((modelName, error) -> { + System.err.println("Model " + modelName + " failed: " + error.getMessage()); + }) + ); +} +---- + +.Kotlin +[source, kt, role="secondary"] +---- +import io.jooby.langchain4j.LangChain4jModule + +{ + install(LangChain4jModule() + .failoverListener { modelName, error -> + println("Model $modelName failed: ${error.message}") + } + ) +} +---- + +=== Registering Custom Providers + +The module includes built-in support for `openai`, `anthropic`, `ollama`, and `jlama`. To add support for an unlisted provider (e.g., Google Vertex AI), you can register a custom `ChatModelFactory`. + +.Java +[source, java, role="primary"] +---- +import io.jooby.langchain4j.LangChain4jModule; +import io.jooby.langchain4j.ChatModelFactory; +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.StreamingChatModel; +import com.typesafe.config.Config; + +{ + install(new LangChain4jModule() + .register("vertex", new ChatModelFactory() { <1> + @Override + public ChatModel createChatModel(Config config) { + return VertexAiGeminiChatModel.builder() + .project(config.getString("project")) + .location(config.getString("location")) + .build(); + } + + @Override + public StreamingChatModel createStreamingModel(Config config) { + return VertexAiGeminiStreamingChatModel.builder() <2> + .project(config.getString("project")) + .location(config.getString("location")) + .build(); + } + }) + ); +} +---- + +.Kotlin +[source, kt, role="secondary"] +---- +import io.jooby.langchain4j.LangChain4jModule +import io.jooby.langchain4j.ChatModelFactory +import dev.langchain4j.model.chat.ChatModel +import dev.langchain4j.model.chat.StreamingChatModel +import com.typesafe.config.Config + +{ + install(LangChain4jModule() + .register("vertex", object : ChatModelFactory { <1> + override fun createChatModel(config: Config): ChatModel { + return VertexAiGeminiChatModel.builder() + .project(config.getString("project")) + .location(config.getString("location")) + .build() + } + + override fun createStreamingModel(config: Config): StreamingChatModel { + return VertexAiGeminiStreamingChatModel.builder() <2> + .project(config.getString("project")) + .location(config.getString("location")) + .build() + } + }) + ) +} +---- +<1> Register the custom provider name matching the `provider` key in your `.conf` file. +<2> `createStreamingModel` is implemented as an optional default method in the interface. Not all providers support streaming. If your chosen provider does not support it, simply do not override this method (it returns `null` by default). + +==== Accessing the Concrete Implementation + +While you should generally interact with models via the standard `ChatModel` and `StreamingChatModel` interfaces, the module also registers the exact class implementation in Jooby's Service Registry. + +If you need to access provider-specific methods on the actual builder output, you can require the concrete class directly: + +.Java +[source, java, role="primary"] +---- +import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; + +{ + get("/vertex-specific", ctx -> { + // Retrieve the exact underlying implementation + VertexAiGeminiChatModel gemini = require(VertexAiGeminiChatModel.class); + // ... + }); +} +---- + +.Kotlin +[source, kt, role="secondary"] +---- +import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel + +{ + get("/vertex-specific") { + // Retrieve the exact underlying implementation + val gemini = require() + // ... + } +} +---- diff --git a/docs/asciidoc/modules/modules.adoc b/docs/asciidoc/modules/modules.adoc index 8029f4b8be..3458e56617 100644 --- a/docs/asciidoc/modules/modules.adoc +++ b/docs/asciidoc/modules/modules.adoc @@ -6,6 +6,9 @@ Unlike other frameworks, Jooby modules **do not** create new layers of abstracti Modules are distributed as separate dependencies. Below is the catalog of officially supported Jooby modules: +==== AI + * link:{uiVersion}/modules/langchain4j[LangChain4j]: Supercharge your Java application with the power of LLMs. + ==== Cloud * link:{uiVersion}/modules/awssdkv2[AWS-SDK v2]: Amazon Web Service module SDK 2. * link:{uiVersion}/modules/aws[AWS SDK v1]: Amazon Web Service module SDK 1. diff --git a/modules/jooby-langchain4j/pom.xml b/modules/jooby-langchain4j/pom.xml new file mode 100644 index 0000000000..20dd830ecf --- /dev/null +++ b/modules/jooby-langchain4j/pom.xml @@ -0,0 +1,82 @@ + + + + 4.0.0 + + + io.jooby + modules + 4.0.17-SNAPSHOT + + jooby-langchain4j + jooby-langchain4j + + + + io.jooby + jooby + ${jooby.version} + + + + dev.langchain4j + langchain4j-core + + + + dev.langchain4j + langchain4j-open-ai + true + + + + dev.langchain4j + langchain4j-anthropic + true + + + + dev.langchain4j + langchain4j-ollama + true + + + + dev.langchain4j + langchain4j-jlama + true + + + + + org.junit.jupiter + junit-jupiter-engine + test + + + + org.mockito + mockito-core + test + + + + org.jacoco + org.jacoco.agent + runtime + test + + + + + + + dev.langchain4j + langchain4j-bom + 1.12.2 + pom + import + + + + diff --git a/modules/jooby-langchain4j/src/main/java/io/jooby/internal/langchain4j/BuiltInModel.java b/modules/jooby-langchain4j/src/main/java/io/jooby/internal/langchain4j/BuiltInModel.java new file mode 100644 index 0000000000..a144e41942 --- /dev/null +++ b/modules/jooby-langchain4j/src/main/java/io/jooby/internal/langchain4j/BuiltInModel.java @@ -0,0 +1,177 @@ +/* + * Jooby https://jooby.io + * Apache License Version 2.0 https://jooby.io/LICENSE.txt + * Copyright 2014 Edgar Espina + */ +package io.jooby.internal.langchain4j; + +import java.time.Duration; + +import com.typesafe.config.Config; +import dev.langchain4j.model.anthropic.AnthropicChatModel; +import dev.langchain4j.model.anthropic.AnthropicStreamingChatModel; +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.StreamingChatModel; +import dev.langchain4j.model.jlama.JlamaChatModel; +import dev.langchain4j.model.jlama.JlamaStreamingChatModel; +import dev.langchain4j.model.ollama.OllamaChatModel; +import dev.langchain4j.model.ollama.OllamaStreamingChatModel; +import dev.langchain4j.model.openai.OpenAiChatModel; +import dev.langchain4j.model.openai.OpenAiStreamingChatModel; +import edu.umd.cs.findbugs.annotations.NonNull; +import io.jooby.langchain4j.ChatModelFactory; + +/** + * Enumeration of built-in LangChain4j model providers supported by the Jooby extension. Each + * constant implements {@link ChatModelFactory} to provide provider-specific instantiation logic. + */ +public enum BuiltInModel implements ChatModelFactory { + OPENAI { + @Override + public ChatModel createChatModel(@NonNull Config config) { + check("dev.langchain4j.model.openai.OpenAiChatModel", "langchain4j-open-ai"); + return OpenAiChatModel.builder() + .apiKey(config.getString("api-key")) + .modelName(config.hasPath("model-name") ? config.getString("model-name") : "gpt-4o-mini") + .timeout(getTimeout(config, Duration.ofSeconds(60))) + .temperature(getTemp(config)) + .build(); + } + + @Override + public StreamingChatModel createStreamingModel(@NonNull Config config) { + return OpenAiStreamingChatModel.builder() + .apiKey(config.getString("api-key")) + .modelName(config.hasPath("model-name") ? config.getString("model-name") : "gpt-4o-mini") + .timeout(getStreamTimeout(config)) + .temperature(getTemp(config)) + .build(); + } + }, + + ANTHROPIC { + @Override + public ChatModel createChatModel(@NonNull Config config) { + check("dev.langchain4j.model.anthropic.AnthropicChatModel", "langchain4j-anthropic"); + return AnthropicChatModel.builder() + .apiKey(config.getString("api-key")) + .modelName( + config.hasPath("model-name") + ? config.getString("model-name") + : "claude-3-5-sonnet-latest") + .timeout(getTimeout(config, Duration.ofSeconds(60))) + .temperature(getTemp(config)) + .build(); + } + + @Override + public StreamingChatModel createStreamingModel(@NonNull Config config) { + return AnthropicStreamingChatModel.builder() + .apiKey(config.getString("api-key")) + .modelName( + config.hasPath("model-name") + ? config.getString("model-name") + : "claude-3-5-sonnet-latest") + .timeout(getStreamTimeout(config)) + .temperature(getTemp(config)) + .build(); + } + }, + + OLLAMA { + @Override + public ChatModel createChatModel(@NonNull Config config) { + check("dev.langchain4j.model.ollama.OllamaChatModel", "langchain4j-ollama"); + return OllamaChatModel.builder() + .baseUrl(config.getString("base-url")) + .modelName(config.getString("model-name")) + .timeout(getTimeout(config, Duration.ofSeconds(60))) + .build(); + } + + @Override + public StreamingChatModel createStreamingModel(@NonNull Config config) { + return OllamaStreamingChatModel.builder() + .baseUrl(config.getString("base-url")) + .modelName(config.getString("model-name")) + .timeout(getStreamTimeout(config)) + .build(); + } + }, + + JLAMA { + @Override + public ChatModel createChatModel(@NonNull Config config) { + check("dev.langchain4j.model.jlama.JlamaChatModel", "langchain4j-jlama"); + return JlamaChatModel.builder() + .modelName(config.getString("model-name")) + .workingDirectory(getOrCreateWorkingDir(config)) + .build(); + } + + @Override + public StreamingChatModel createStreamingModel(@NonNull Config config) { + return JlamaStreamingChatModel.builder() + .modelName(config.getString("model-name")) + .workingDirectory(getOrCreateWorkingDir(config)) + .build(); + } + }; + + /** + * Resolves a built-in provider by name. + * + * @param name The provider name (e.g. "openai"). + * @return The corresponding enum constant. + * @throws IllegalArgumentException if provider is unknown. + */ + public static BuiltInModel resolve(String name) { + try { + return valueOf(name.toUpperCase()); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Unsupported LangChain4j provider: " + name); + } + } + + // --- Helper Methods for Enum Implementation --- + + protected void check(String className, String artifact) { + try { + Class.forName(className); + } catch (ClassNotFoundException e) { + throw new IllegalStateException( + "Provider dependency missing. Add 'dev.langchain4j:" + artifact + "' to your project."); + } + } + + protected Duration getTimeout(Config config, Duration defaultValue) { + return config.hasPath("timeout") ? config.getDuration("timeout") : defaultValue; + } + + protected Duration getStreamTimeout(Config config) { + return config.hasPath("streaming-timeout") + ? config.getDuration("streaming-timeout") + : Duration.ofSeconds(10); + } + + protected double getTemp(Config config) { + return config.hasPath("temperature") ? config.getDouble("temperature") : 0.7; + } + + protected java.nio.file.Path getOrCreateWorkingDir(Config config) { + java.nio.file.Path path = + config.hasPath("working-dir") + ? java.nio.file.Paths.get(config.getString("working-dir")) + : java.nio.file.Paths.get(System.getProperty("user.dir"), "models"); + + try { + // Jlama explicitly requires the directory to exist before booting + if (!java.nio.file.Files.exists(path)) { + java.nio.file.Files.createDirectories(path); + } + return path; + } catch (java.io.IOException e) { + throw new IllegalStateException("Failed to create a working directory at: " + path, e); + } + } +} diff --git a/modules/jooby-langchain4j/src/main/java/io/jooby/internal/langchain4j/FallbackChatModel.java b/modules/jooby-langchain4j/src/main/java/io/jooby/internal/langchain4j/FallbackChatModel.java new file mode 100644 index 0000000000..b036b5e280 --- /dev/null +++ b/modules/jooby-langchain4j/src/main/java/io/jooby/internal/langchain4j/FallbackChatModel.java @@ -0,0 +1,40 @@ +/* + * Jooby https://jooby.io + * Apache License Version 2.0 https://jooby.io/LICENSE.txt + * Copyright 2014 Edgar Espina + */ +package io.jooby.internal.langchain4j; + +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.request.ChatRequest; +import dev.langchain4j.model.chat.response.ChatResponse; +import io.jooby.langchain4j.FailoverListener; + +/** + * Decorator for {@link ChatModel} that provides failover logic. Catching exceptions from the + * primary model and routing to a fallback instance. + */ +public class FallbackChatModel implements ChatModel { + private final String name; + private final ChatModel primary; + private final ChatModel fallback; + private final FailoverListener listener; + + public FallbackChatModel( + String name, ChatModel primary, ChatModel fallback, FailoverListener listener) { + this.name = name; + this.primary = primary; + this.fallback = fallback; + this.listener = listener; + } + + @Override + public ChatResponse chat(ChatRequest request) { + try { + return primary.chat(request); + } catch (Exception e) { + listener.onFailover(name, e); + return fallback.chat(request); + } + } +} diff --git a/modules/jooby-langchain4j/src/main/java/io/jooby/internal/langchain4j/FallbackStreamingChatModel.java b/modules/jooby-langchain4j/src/main/java/io/jooby/internal/langchain4j/FallbackStreamingChatModel.java new file mode 100644 index 0000000000..aae7191f6b --- /dev/null +++ b/modules/jooby-langchain4j/src/main/java/io/jooby/internal/langchain4j/FallbackStreamingChatModel.java @@ -0,0 +1,64 @@ +/* + * Jooby https://jooby.io + * Apache License Version 2.0 https://jooby.io/LICENSE.txt + * Copyright 2014 Edgar Espina + */ +package io.jooby.internal.langchain4j; + +import dev.langchain4j.model.chat.StreamingChatModel; +import dev.langchain4j.model.chat.request.ChatRequest; +import dev.langchain4j.model.chat.response.StreamingChatResponseHandler; +import io.jooby.langchain4j.FailoverListener; + +/** + * Decorator for {@link StreamingChatModel} handling asynchronous failover. Fallback triggers only + * if the error occurs before the first token is received. + */ +public class FallbackStreamingChatModel implements StreamingChatModel { + private final String name; + private final StreamingChatModel primary; + private final StreamingChatModel fallback; + private final FailoverListener fallbackListener; + + public FallbackStreamingChatModel( + String name, + StreamingChatModel primary, + StreamingChatModel fallback, + FailoverListener fallbackListener) { + this.name = name; + this.primary = primary; + this.fallback = fallback; + this.fallbackListener = fallbackListener; + } + + @Override + public void chat(ChatRequest request, StreamingChatResponseHandler handler) { + primary.chat( + request, + new StreamingChatResponseHandler() { + private boolean started = false; + + @Override + public void onPartialResponse(String token) { + started = true; + handler.onPartialResponse(token); + } + + @Override + public void onCompleteResponse( + dev.langchain4j.model.chat.response.ChatResponse response) { + handler.onCompleteResponse(response); + } + + @Override + public void onError(Throwable error) { + if (!started) { + fallbackListener.onFailover(name, error); + fallback.chat(request, handler); + } else { + handler.onError(error); + } + } + }); + } +} diff --git a/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/ChatModelFactory.java b/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/ChatModelFactory.java new file mode 100644 index 0000000000..ed9cffad65 --- /dev/null +++ b/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/ChatModelFactory.java @@ -0,0 +1,41 @@ +/* + * Jooby https://jooby.io + * Apache License Version 2.0 https://jooby.io/LICENSE.txt + * Copyright 2014 Edgar Espina + */ +package io.jooby.langchain4j; + +import com.typesafe.config.Config; +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.StreamingChatModel; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; + +/** + * Factory contract for creating LangChain4j chat models from Jooby configuration. Implementations + * map {@link Config} keys to specific model builder methods. + * + * @author edgar + * @since 1.0.0 + */ +public interface ChatModelFactory { + + /** + * Creates a blocking chat model. + * + * @param config The configuration block for this model. + * @return A non-null instance of a {@link ChatModel}. + */ + ChatModel createChatModel(@NonNull Config config); + + /** + * Creates a streaming chat model. Returns {@code null} if the provider does not support + * streaming. + * + * @param config The configuration block for this model. + * @return A {@link StreamingChatModel} or {@code null}. + */ + @Nullable default StreamingChatModel createStreamingModel(@NonNull Config config) { + return null; + } +} diff --git a/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/FailoverListener.java b/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/FailoverListener.java new file mode 100644 index 0000000000..aab388f2e8 --- /dev/null +++ b/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/FailoverListener.java @@ -0,0 +1,18 @@ +/* + * Jooby https://jooby.io + * Apache License Version 2.0 https://jooby.io/LICENSE.txt + * Copyright 2014 Edgar Espina + */ +package io.jooby.langchain4j; + +/** Listener for failover events in a model chain. */ +@FunctionalInterface +public interface FailoverListener { + /** + * Called when a primary model fails and the system switches to a fallback. + * + * @param modelName The name of the model that failed. + * @param error The exception that triggered the fallback. + */ + void onFailover(String modelName, Throwable error); +} diff --git a/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/LangChain4jModule.java b/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/LangChain4jModule.java new file mode 100644 index 0000000000..c72d869f20 --- /dev/null +++ b/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/LangChain4jModule.java @@ -0,0 +1,214 @@ +/* + * Jooby https://jooby.io + * Apache License Version 2.0 https://jooby.io/LICENSE.txt + * Copyright 2014 Edgar Espina + */ +package io.jooby.langchain4j; + +import java.util.*; + +import org.slf4j.LoggerFactory; + +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.StreamingChatModel; +import io.jooby.Extension; +import io.jooby.Jooby; +import io.jooby.ServiceKey; +import io.jooby.internal.langchain4j.BuiltInModel; +import io.jooby.internal.langchain4j.FallbackChatModel; +import io.jooby.internal.langchain4j.FallbackStreamingChatModel; + +/** + * Jooby Extension for LangChain4j. + * + *

This module provides seamless integration between Jooby and the LangChain4j 1.x ecosystem, + * automatically instantiating and registering {@link dev.langchain4j.model.chat.ChatModel} and + * {@link dev.langchain4j.model.chat.StreamingChatModel} components based on your application + * configuration. + * + *

Installation

+ * + *

Install the module inside your Jooby application: + * + *

{@code
+ * {
+ *   install(new LangChain4jModule());
+ *   get("/chat", ctx -> {
+ *     ChatModel ai = require(ChatModel.class);
+ *     return ai.chat("Hello world!");
+ *   });
+ *   }
+ * }
+ * + *

Configuration

+ * + *

Models are defined in your {@code application.conf} under the {@code langchain4j.models} key. + * The module automatically creates both blocking and streaming interfaces if the provider supports + * them. + * + *

{@code
+ * langchain4j {
+ *   models {
+ *   # The name of the registered service will be "gpt-assistant"
+ *     gpt-assistant {
+ *       provider = "openai"
+ *       api-key = ${OPENAI_API_KEY}
+ *       model-name = "gpt-4o-mini"
+ *       timeout = 30s
+ *       temperature = 0.7
+ *    }
+ *   }
+ * }
+ * }
+ * + *

Resilience & Fallback Routing

+ * + *

You can define a chain of fallbacks to ensure high availability. If the primary model fails + * (e.g., due to rate limits or network timeouts), the module automatically and silently routes the + * request to the next configured fallback. + * + *

{@code
+ * langchain4j.models {
+ *   primary-agent {
+ *     provider = "openai"
+ *     api-key = "..."
+ *     fallback = ["local-failover"]
+ *   }
+ *   local-failover {
+ *     provider = "jlama"
+ *     model-name = "tjake/Llama-3.2-1B-Instruct-JQ4"
+ *   }
+ * }
+ * }
+ * + *

To track when these failovers occur, attach a listener during module installation: + * + *

{@code
+ * install(new LangChain4jModule()
+ *   .failoverListener((modelName, error) -> {
+ *     log.warn("Model {} failed, switching to fallback. Reason: {}", modelName, error.getMessage());
+ *    })
+ * );
+ * }
+ * + *

Custom Providers

+ * + *

The extension includes built-in support for popular providers like OpenAI, Anthropic, Ollama, + * and Jlama. To add support for an unlisted provider (e.g., Google Vertex AI), register a custom + * {@link io.jooby.langchain4j.ChatModelFactory}: + * + *

{@code
+ * install(new LangChain4jModule()
+ *   .register("vertex", new ChatModelFactory() {
+ *     @Override
+ *     public ChatModel createChatModel(Config config) {
+ *       return VertexAiGeminiChatModel.builder()
+ *          .project(config.getString("project"))
+ *          .location(config.getString("location"))
+ *          .build();
+ *     }
+ *   }));
+ * }
+ * + *

Dependency Management

+ * + *

To keep your application lightweight, the heavy provider SDKs (like {@code + * langchain4j-open-ai} or {@code langchain4j-jlama}) are marked as optional. You + * must explicitly add the dependencies for the specific providers you intend to use to your + * project's {@code pom.xml} or {@code build.gradle}. + * + * @author edgar + * @since 4.1.0 + */ +public class LangChain4jModule implements Extension { + private FailoverListener failoverListener; + private final Map factories = new HashMap<>(); + + /** + * Registers a custom provider factory. + * + * @param provider The provider name (e.g., "vertex"). + * @param factory The factory implementation. + * @return This module. + */ + public LangChain4jModule register(String provider, ChatModelFactory factory) { + factories.put(provider.toLowerCase(), factory); + return this; + } + + /** + * Listener for failover events in a model chain. Called when a primary model fails and the system + * switches to a fallback. + * + * @param failoverListener Failover listener. + * @return This module. + */ + public LangChain4jModule failoverListener(FailoverListener failoverListener) { + this.failoverListener = failoverListener; + return this; + } + + @SuppressWarnings({"rawtypes", "unchecked"}) + @Override + public void install(Jooby app) { + var config = app.getConfig(); + + if (!config.hasPath("langchain4j.models")) { + throw new IllegalStateException( + "LangChain4j module installed, but no models found in configuration. Please define at" + + " least one model under the 'langchain4j.models' key in your application.conf"); + } + + var modelsConfig = config.getConfig("langchain4j.models"); + Map blockingMap = new HashMap<>(); + Map streamingMap = new HashMap<>(); + + if (failoverListener == null) { + failoverListener = + (name, error) -> { + LoggerFactory.getLogger(getClass()) + .error("execution of '{}' resulted in exception", name, error); + }; + } + + // 1. Creation + for (var name : modelsConfig.root().keySet()) { + var mConf = modelsConfig.getConfig(name); + var provider = mConf.getString("provider").toLowerCase(); + var f = + factories.containsKey(provider) + ? factories.get(provider) + : BuiltInModel.resolve(provider); + + blockingMap.put(name, f.createChatModel(mConf)); + streamingMap.put(name, f.createStreamingModel(mConf)); + } + + var services = app.getServices(); + // 2. Chaining & Registration + for (var name : modelsConfig.root().keySet()) { + var mConf = modelsConfig.getConfig(name); + var b = blockingMap.get(name); + var s = streamingMap.get(name); + + if (mConf.hasPath("fallback")) { + for (var fbName : mConf.getStringList("fallback")) { + b = new FallbackChatModel(name, b, blockingMap.get(fbName), failoverListener); + if (s != null) + s = new FallbackStreamingChatModel(name, s, streamingMap.get(fbName), failoverListener); + } + } + + services.put(ServiceKey.key(ChatModel.class, name), b); + if (s != null) services.put(ServiceKey.key(StreamingChatModel.class, name), s); + + // Set defaults + services.putIfAbsent(ChatModel.class, b); + services.putIfAbsent((Class) b.getClass(), b); + if (s != null) { + services.putIfAbsent(StreamingChatModel.class, s); + services.putIfAbsent((Class) s.getClass(), s); + } + } + } +} diff --git a/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/package-info.java b/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/package-info.java new file mode 100644 index 0000000000..48d5c35797 --- /dev/null +++ b/modules/jooby-langchain4j/src/main/java/io/jooby/langchain4j/package-info.java @@ -0,0 +1,104 @@ +/** + * Jooby Extension for LangChain4j. + * + *

This module provides seamless integration between Jooby and the LangChain4j 1.x ecosystem, + * automatically instantiating and registering {@link dev.langchain4j.model.chat.ChatModel} and + * {@link dev.langchain4j.model.chat.StreamingChatModel} components based on your application + * configuration. + * + *

Installation

+ * + *

Install the module inside your Jooby application: + * + *

{@code
+ * {
+ *   install(new LangChain4jModule());
+ *   get("/chat", ctx -> {
+ *     ChatModel ai = require(ChatModel.class);
+ *     return ai.chat("Hello world!");
+ *   });
+ *   }
+ * }
+ * + *

Configuration

+ * + *

Models are defined in your {@code application.conf} under the {@code langchain4j.models} key. + * The module automatically creates both blocking and streaming interfaces if the provider supports + * them. + * + *

{@code
+ * langchain4j {
+ *   models {
+ *   # The name of the registered service will be "gpt-assistant"
+ *     gpt-assistant {
+ *       provider = "openai"
+ *       api-key = ${OPENAI_API_KEY}
+ *       model-name = "gpt-4o-mini"
+ *       timeout = 30s
+ *       temperature = 0.7
+ *    }
+ *   }
+ * }
+ * }
+ * + *

Resilience & Fallback Routing

+ * + *

You can define a chain of fallbacks to ensure high availability. If the primary model fails + * (e.g., due to rate limits or network timeouts), the module automatically and silently routes the + * request to the next configured fallback. + * + *

{@code
+ * langchain4j.models {
+ *   primary-agent {
+ *     provider = "openai"
+ *     api-key = "..."
+ *     fallback = ["local-failover"]
+ *   }
+ *   local-failover {
+ *     provider = "jlama"
+ *     model-name = "tjake/Llama-3.2-1B-Instruct-JQ4"
+ *   }
+ * }
+ * }
+ * + *

To track when these failovers occur, attach a listener during module installation: + * + *

{@code
+ * install(new LangChain4jModule()
+ *   .failoverListener((modelName, error) -> {
+ *     log.warn("Model {} failed, switching to fallback. Reason: {}", modelName, error.getMessage());
+ *    })
+ * );
+ * }
+ * + *

Custom Providers

+ * + *

The extension includes built-in support for popular providers like OpenAI, Anthropic, Ollama, + * and Jlama. To add support for an unlisted provider (e.g., Google Vertex AI), register a custom + * {@link io.jooby.langchain4j.ChatModelFactory}: + * + *

{@code
+ * install(new LangChain4jModule()
+ *   .register("vertex", new ChatModelFactory() {
+ *     @Override
+ *     public ChatModel createChatModel(Config config) {
+ *       return VertexAiGeminiChatModel.builder()
+ *          .project(config.getString("project"))
+ *          .location(config.getString("location"))
+ *          .build();
+ *     }
+ *   }));
+ * }
+ * + *

Dependency Management

+ * + *

To keep your application lightweight, the heavy provider SDKs (like {@code + * langchain4j-open-ai} or {@code langchain4j-jlama}) are marked as optional. You + * must explicitly add the dependencies for the specific providers you intend to use to your + * project's {@code pom.xml} or {@code build.gradle}. + * + * @author edgar + * @since 4.1.0 + */ +@edu.umd.cs.findbugs.annotations.ReturnValuesAreNonnullByDefault +package io.jooby.langchain4j; diff --git a/modules/jooby-langchain4j/src/main/java/module-info.java b/modules/jooby-langchain4j/src/main/java/module-info.java new file mode 100644 index 0000000000..96e341145e --- /dev/null +++ b/modules/jooby-langchain4j/src/main/java/module-info.java @@ -0,0 +1,15 @@ +module io.jooby.langchain4j { + exports io.jooby.langchain4j; + + requires io.jooby; + requires static com.github.spotbugs.annotations; + requires org.slf4j; + requires typesafe.config; + requires langchain4j.core; + + // Optional provider modules + requires static langchain4j.open.ai; + requires static langchain4j.anthropic; + requires static langchain4j.ollama; + requires static langchain4j.jlama; +} diff --git a/modules/jooby-langchain4j/src/test/java/io/jooby/internal/langchain4j/BuiltInModelTest.java b/modules/jooby-langchain4j/src/test/java/io/jooby/internal/langchain4j/BuiltInModelTest.java new file mode 100644 index 0000000000..5b42ae28d9 --- /dev/null +++ b/modules/jooby-langchain4j/src/test/java/io/jooby/internal/langchain4j/BuiltInModelTest.java @@ -0,0 +1,68 @@ +/* + * Jooby https://jooby.io + * Apache License Version 2.0 https://jooby.io/LICENSE.txt + * Copyright 2014 Edgar Espina + */ +package io.jooby.internal.langchain4j; + +import static org.junit.jupiter.api.Assertions.*; + +import java.util.Map; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; + +class BuiltInModelTest { + + @Test + @DisplayName("Should resolve enum ignoring case") + void resolveEnum() { + assertEquals(BuiltInModel.OPENAI, BuiltInModel.resolve("openai")); + assertEquals(BuiltInModel.ANTHROPIC, BuiltInModel.resolve("Anthropic")); + assertEquals(BuiltInModel.OLLAMA, BuiltInModel.resolve("OLLAMA")); + assertEquals(BuiltInModel.JLAMA, BuiltInModel.resolve("jLama")); + } + + @Test + @DisplayName("Should throw exception for unknown provider") + void resolveUnknown() { + IllegalArgumentException exception = + assertThrows( + IllegalArgumentException.class, + () -> { + BuiltInModel.resolve("vertex"); + }); + assertTrue(exception.getMessage().contains("Unsupported LangChain4j provider")); + } + + @Test + @DisplayName("Should parse timeout and temp values using internal helpers") + void testConfigurationHelpers() { + Config config = ConfigFactory.parseMap(Map.of("timeout", "15s", "temperature", 0.5)); + + // Access helper methods via an enum instance + BuiltInModel tester = BuiltInModel.OPENAI; + + assertEquals(15, tester.getTimeout(config, null).getSeconds()); + assertEquals(0.5, tester.getTemp(config)); + } + + @Test + @DisplayName("Missing dependency check should throw IllegalStateException") + void missingDependencyCheck() { + BuiltInModel tester = BuiltInModel.OPENAI; + + IllegalStateException exception = + assertThrows( + IllegalStateException.class, + () -> { + // Intentionally passing a garbage class name to force the ClassNotFoundException + tester.check("dev.langchain4j.fake.MissingClass", "langchain4j-fake"); + }); + + assertTrue(exception.getMessage().contains("Provider dependency missing")); + } +} diff --git a/modules/jooby-langchain4j/src/test/java/io/jooby/internal/langchain4j/FallbackChatModelTest.java b/modules/jooby-langchain4j/src/test/java/io/jooby/internal/langchain4j/FallbackChatModelTest.java new file mode 100644 index 0000000000..8f2fb27a4c --- /dev/null +++ b/modules/jooby-langchain4j/src/test/java/io/jooby/internal/langchain4j/FallbackChatModelTest.java @@ -0,0 +1,88 @@ +/* + * Jooby https://jooby.io + * Apache License Version 2.0 https://jooby.io/LICENSE.txt + * Copyright 2014 Edgar Espina + */ +package io.jooby.internal.langchain4j; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.*; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.request.ChatRequest; +import dev.langchain4j.model.chat.response.ChatResponse; +import io.jooby.langchain4j.FailoverListener; + +class FallbackChatModelTest { + + private ChatRequest createValidRequest() { + return ChatRequest.builder().messages(UserMessage.from("Hello")).build(); + } + + @Test + @DisplayName("Should return primary response when primary succeeds") + void primarySucceeds() { + ChatModel primary = mock(ChatModel.class); + ChatModel fallback = mock(ChatModel.class); + FailoverListener listener = mock(FailoverListener.class); + ChatRequest request = createValidRequest(); + + // Mock the response to bypass builder validation + ChatResponse response = mock(ChatResponse.class); + + when(primary.chat(request)).thenReturn(response); + + FallbackChatModel model = new FallbackChatModel("test-model", primary, fallback, listener); + ChatResponse result = model.chat(request); + + assertEquals(response, result); + verify(primary).chat(request); + verifyNoInteractions(fallback); + verifyNoInteractions(listener); + } + + @Test + @DisplayName("Should trigger listener and use fallback when primary fails") + void primaryFailsFallbackSucceeds() { + ChatModel primary = mock(ChatModel.class); + ChatModel fallback = mock(ChatModel.class); + FailoverListener listener = mock(FailoverListener.class); + ChatRequest request = createValidRequest(); + + // Mock the response to bypass builder validation + ChatResponse fallbackResponse = mock(ChatResponse.class); + RuntimeException apiError = new RuntimeException("API Rate Limit"); + + when(primary.chat(request)).thenThrow(apiError); + when(fallback.chat(request)).thenReturn(fallbackResponse); + + FallbackChatModel model = new FallbackChatModel("test-model", primary, fallback, listener); + ChatResponse result = model.chat(request); + + assertEquals(fallbackResponse, result); + verify(listener).onFailover("test-model", apiError); + verify(fallback).chat(request); + } + + @Test + @DisplayName("Should throw exception if both primary and fallback fail") + void bothFail() { + ChatModel primary = mock(ChatModel.class); + ChatModel fallback = mock(ChatModel.class); + FailoverListener listener = mock(FailoverListener.class); + ChatRequest request = createValidRequest(); + + when(primary.chat(request)).thenThrow(new RuntimeException("Primary Down")); + when(fallback.chat(request)).thenThrow(new RuntimeException("Fallback Down")); + + FallbackChatModel model = new FallbackChatModel("test-model", primary, fallback, listener); + + assertThrows(RuntimeException.class, () -> model.chat(request)); + verify(listener).onFailover(eq("test-model"), any(RuntimeException.class)); + } +} diff --git a/modules/jooby-langchain4j/src/test/java/io/jooby/internal/langchain4j/FallbackStreamingChatModelTest.java b/modules/jooby-langchain4j/src/test/java/io/jooby/internal/langchain4j/FallbackStreamingChatModelTest.java new file mode 100644 index 0000000000..efa7647aeb --- /dev/null +++ b/modules/jooby-langchain4j/src/test/java/io/jooby/internal/langchain4j/FallbackStreamingChatModelTest.java @@ -0,0 +1,112 @@ +/* + * Jooby https://jooby.io + * Apache License Version 2.0 https://jooby.io/LICENSE.txt + * Copyright 2014 Edgar Espina + */ +package io.jooby.internal.langchain4j; + +import static org.mockito.Mockito.*; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.chat.StreamingChatModel; +import dev.langchain4j.model.chat.request.ChatRequest; +import dev.langchain4j.model.chat.response.ChatResponse; +import dev.langchain4j.model.chat.response.StreamingChatResponseHandler; +import io.jooby.langchain4j.FailoverListener; + +class FallbackStreamingChatModelTest { + + private ChatRequest createValidRequest() { + return ChatRequest.builder().messages(UserMessage.from("Hello")).build(); + } + + @Test + @DisplayName("Should trigger fallback if primary fails before first token") + void failsBeforeTokens() { + StreamingChatModel primary = mock(StreamingChatModel.class); + StreamingChatModel fallback = mock(StreamingChatModel.class); + FailoverListener listener = mock(FailoverListener.class); + StreamingChatResponseHandler originalHandler = mock(StreamingChatResponseHandler.class); + ChatRequest request = createValidRequest(); + + doAnswer( + invocation -> { + StreamingChatResponseHandler internalHandler = invocation.getArgument(1); + internalHandler.onError(new RuntimeException("Connection Timeout")); + return null; + }) + .when(primary) + .chat(eq(request), any()); + + FallbackStreamingChatModel model = + new FallbackStreamingChatModel("stream-ai", primary, fallback, listener); + model.chat(request, originalHandler); + + verify(listener).onFailover(eq("stream-ai"), any(RuntimeException.class)); + verify(fallback).chat(eq(request), eq(originalHandler)); + verify(originalHandler, never()).onError(any()); + } + + @Test + @DisplayName("Should NOT trigger fallback if primary fails mid-stream") + void failsMidStream() { + StreamingChatModel primary = mock(StreamingChatModel.class); + StreamingChatModel fallback = mock(StreamingChatModel.class); + FailoverListener listener = mock(FailoverListener.class); + StreamingChatResponseHandler originalHandler = mock(StreamingChatResponseHandler.class); + ChatRequest request = createValidRequest(); + + doAnswer( + invocation -> { + StreamingChatResponseHandler internalHandler = invocation.getArgument(1); + internalHandler.onPartialResponse("Hello"); + internalHandler.onError(new RuntimeException("Connection Dropped")); + return null; + }) + .when(primary) + .chat(eq(request), any()); + + FallbackStreamingChatModel model = + new FallbackStreamingChatModel("stream-ai", primary, fallback, listener); + model.chat(request, originalHandler); + + verify(originalHandler).onPartialResponse("Hello"); + verifyNoInteractions(listener); + verifyNoInteractions(fallback); + verify(originalHandler).onError(any(RuntimeException.class)); + } + + @Test + @DisplayName("Should pass successful completion down to original handler") + void successfulCompletion() { + StreamingChatModel primary = mock(StreamingChatModel.class); + StreamingChatModel fallback = mock(StreamingChatModel.class); + FailoverListener listener = mock(FailoverListener.class); + StreamingChatResponseHandler originalHandler = mock(StreamingChatResponseHandler.class); + ChatRequest request = createValidRequest(); + + // Mock the response to bypass builder validation + ChatResponse completeResponse = mock(ChatResponse.class); + + doAnswer( + invocation -> { + StreamingChatResponseHandler internalHandler = invocation.getArgument(1); + internalHandler.onPartialResponse("Done"); + internalHandler.onCompleteResponse(completeResponse); + return null; + }) + .when(primary) + .chat(eq(request), any()); + + FallbackStreamingChatModel model = + new FallbackStreamingChatModel("stream-ai", primary, fallback, listener); + model.chat(request, originalHandler); + + verify(originalHandler).onPartialResponse("Done"); + verify(originalHandler).onCompleteResponse(completeResponse); + verifyNoInteractions(fallback); + } +} diff --git a/modules/jooby-langchain4j/src/test/java/io/jooby/langchain4j/LangChain4jModuleTest.java b/modules/jooby-langchain4j/src/test/java/io/jooby/langchain4j/LangChain4jModuleTest.java new file mode 100644 index 0000000000..16cb1120ca --- /dev/null +++ b/modules/jooby-langchain4j/src/test/java/io/jooby/langchain4j/LangChain4jModuleTest.java @@ -0,0 +1,111 @@ +/* + * Jooby https://jooby.io + * Apache License Version 2.0 https://jooby.io/LICENSE.txt + * Copyright 2014 Edgar Espina + */ +package io.jooby.langchain4j; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; + +import java.util.Map; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.StreamingChatModel; +import edu.umd.cs.findbugs.annotations.NonNull; +import io.jooby.Environment; +import io.jooby.Jooby; +import io.jooby.ServiceKey; +import io.jooby.ServiceRegistry; +import io.jooby.internal.langchain4j.FallbackChatModel; + +class LangChain4jModuleTest { + + @Test + @DisplayName("Should parse config and register custom factory models") + void customFactoryRegistration() { + var app = + createApp( + ConfigFactory.parseMap( + Map.of( + "langchain4j.models.my-agent.provider", "custom", + "langchain4j.models.my-agent.api-key", "secret"))); + + ChatModelFactory mockFactory = mock(ChatModelFactory.class); + ChatModel mockChatModel = mock(ChatModel.class); + StreamingChatModel mockStreamModel = mock(StreamingChatModel.class); + + when(mockFactory.createChatModel(any())).thenReturn(mockChatModel); + when(mockFactory.createStreamingModel(any())).thenReturn(mockStreamModel); + + LangChain4jModule module = new LangChain4jModule().register("custom", mockFactory); + + module.install(app); + + ServiceRegistry services = app.getServices(); + + // Check specific named registration + assertEquals(mockChatModel, services.get(ChatModel.class)); + assertEquals(mockStreamModel, services.get(StreamingChatModel.class)); + + // Check default (unnamed) registration since it's the first/only model + assertEquals(mockChatModel, services.get(ChatModel.class)); + assertEquals(mockStreamModel, services.get(StreamingChatModel.class)); + } + + @NonNull private static Jooby createApp(Config config) { + var app = new Jooby(); + var environment = mock(Environment.class); + when(environment.getConfig()).thenReturn(config); + app.setEnvironment(environment); + return app; + } + + @Test + @DisplayName("Should wrap models in Fallback decorators when configured") + void fallbackChainingRegistration() { + Jooby app = + createApp( + ConfigFactory.parseMap( + Map.of( + "langchain4j.models.primary.provider", "custom", + "langchain4j.models.primary.fallback", java.util.List.of("secondary"), + "langchain4j.models.secondary.provider", "custom"))); + + ChatModelFactory mockFactory = mock(ChatModelFactory.class); + when(mockFactory.createChatModel(any())).thenReturn(mock(ChatModel.class)); + + LangChain4jModule module = new LangChain4jModule().register("custom", mockFactory); + module.install(app); + + ServiceRegistry services = app.getServices(); + ChatModel primaryModel = services.get(ServiceKey.key(ChatModel.class, "primary")); + ChatModel secondaryModel = services.get(ServiceKey.key(ChatModel.class, "secondary")); + + // The secondary should be standard, but the primary should be wrapped in the decorator + assertFalse(secondaryModel instanceof FallbackChatModel); + assertInstanceOf(FallbackChatModel.class, primaryModel); + } + + @Test + @DisplayName("Should fail fast if langchain4j.models configuration is missing") + void missingConfigurationFailsFast() { + Jooby app = createApp(ConfigFactory.empty()); + + LangChain4jModule module = new LangChain4jModule(); + + IllegalStateException exception = + assertThrows( + IllegalStateException.class, + () -> { + module.install(app); + }); + + assertTrue(exception.getMessage().contains("no models found in configuration")); + } +} diff --git a/modules/pom.xml b/modules/pom.xml index f7c20aa517..ffc309e462 100644 --- a/modules/pom.xml +++ b/modules/pom.xml @@ -32,7 +32,9 @@ jooby-swagger-ui jooby-redoc - + jooby-langchain4j + + jooby-trpc diff --git a/pom.xml b/pom.xml index c577729768..6a48310b0d 100644 --- a/pom.xml +++ b/pom.xml @@ -337,6 +337,12 @@ ${jooby.version} + + io.jooby + jooby-langchain4j + ${jooby.version} + + io.jooby jooby-openapi