Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 59 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,65 @@ var request =
var response = openAIClient.chat().completions().create(request);
```

### LangChain4j Instrumentation

```java
var braintrust = Braintrust.get();
var openTelemetry = braintrust.openTelemetryCreate();

// Wrap the chat model to trace LLM calls
ChatModel model = BraintrustLangchain.wrap(
openTelemetry,
OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.temperature(0.0));

var response = model.chat("What is the capital of France?");
```

#### Tool Wrapping

Use `BraintrustLangchain.wrapTools()` to automatically trace tool executions in your LangChain4j agents:

```java
// Create your tool class
public class WeatherTools {
@Tool("Get current weather for a location")
public String getWeather(String location) {
return "The weather in " + location + " is sunny.";
}
}

// Wrap tools to create spans for each tool execution
WeatherTools tools = new WeatherTools();
WeatherTools instrumentedTools = BraintrustLangchain.wrapTools(openTelemetry, tools);

// Use instrumented tools in your AI service
Assistant assistant = AiServices.builder(Assistant.class)
.chatModel(model)
.tools(instrumentedTools)
.build();
```

Each tool call will automatically create an OpenTelemetry span in Braintrust with:
- Tool name and parameters
- Execution duration
- Return values
- Any exceptions thrown

**Note:** For proper display in the Braintrust UI, ensure parent spans (conversation, turn, etc.) also set the required Braintrust attributes:
```java
var span = tracer.spanBuilder("my-span").startSpan();
span.setAttribute("braintrust.span_attributes", "{\"type\":\"task\",\"name\":\"my-span\"}");
span.setAttribute("braintrust.input_json", "{\"user_message\":\"...\"}");
// ... do work ...
span.setAttribute("braintrust.output_json", "{\"result\":\"...\"}");
span.end();
```

See [LangchainToolWrappingExample.java](./examples/src/main/java/dev/braintrust/examples/LangchainToolWrappingExample.java) for a complete example with proper span hierarchy.

## Running Examples

Example source code can be found [here](./examples/src/main/java/dev/braintrust/examples)
Expand Down
18 changes: 16 additions & 2 deletions examples/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -159,11 +159,25 @@ task runRemoteEval(type: JavaExec) {
}
}

task runLangchain(type: JavaExec) {
task runLangchainSimple(type: JavaExec) {
group = 'Braintrust SDK Examples'
description = 'Run the LangChain4j instrumentation example. NOTE: this requires OPENAI_API_KEY to be exported and will make a small call to openai, using your tokens'
classpath = sourceSets.main.runtimeClasspath
mainClass = 'dev.braintrust.examples.LangchainExample'
mainClass = 'dev.braintrust.examples.LangchainSimpleExample'
systemProperty 'org.slf4j.simpleLogger.log.dev.braintrust', braintrustLogLevel
debugOptions {
enabled = true
port = 5566
server = true
suspend = false
}
}

task runLangchainAIServices(type: JavaExec) {
group = 'Braintrust SDK Examples'
description = 'Run the LangChain4j AI Services example. NOTE: this requires OPENAI_API_KEY to be exported and will make a small call to openai, using your tokens'
classpath = sourceSets.main.runtimeClasspath
mainClass = 'dev.braintrust.examples.LangchainAIServicesExample'
systemProperty 'org.slf4j.simpleLogger.log.dev.braintrust', braintrustLogLevel
debugOptions {
enabled = true
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
package dev.braintrust.examples;

import dev.braintrust.Braintrust;
import dev.braintrust.instrumentation.langchain.BraintrustLangchain;
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;

public class LangchainAIServicesExample {

public static void main(String[] args) throws Exception {
var braintrust = Braintrust.get();
var openTelemetry = braintrust.openTelemetryCreate();

Assistant assistant =
BraintrustLangchain.wrap(
openTelemetry,
AiServices.builder(Assistant.class)
.chatModel(
OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.temperature(0.0)
.build())
.tools(new WeatherTools())
.executeToolsConcurrently());

var rootSpan =
openTelemetry
.getTracer("my-instrumentation")
.spanBuilder("langchain4j-ai-services-example")
.startSpan();
try (var ignored = rootSpan.makeCurrent()) {
// response 1 should do a concurrent tool call
var response1 = assistant.chat("is it hotter in Paris or New York right now?");
System.out.println("response1: " + response1);
var response2 = assistant.chat("what's the five day forecast for San Francisco?");
System.out.println("response2: " + response2);
} finally {
rootSpan.end();
}
var url =
braintrust.projectUri()
+ "/logs?r=%s&s=%s"
.formatted(
rootSpan.getSpanContext().getTraceId(),
rootSpan.getSpanContext().getSpanId());
System.out.println(
"\n\n Example complete! View your data in Braintrust: %s\n".formatted(url));
}

/** AI Service interface for the assistant */
interface Assistant {
String chat(String userMessage);
}

/** Example tool class with weather-related methods */
public static class WeatherTools {
@Tool("Get current weather for a location")
public String getWeather(String location) {
return String.format("The weather in %s is sunny with 72°F temperature.", location);
}

@Tool("Get weather forecast for next N days")
public String getForecast(String location, int days) {
return String.format(
"The %d-day forecast for %s: Mostly sunny with temperatures between 65-75°F.",
days, location);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import dev.langchain4j.model.openai.OpenAiChatModel;

/** Basic OTel + LangChain4j instrumentation example */
public class LangchainExample {
public class LangchainSimpleExample {

public static void main(String[] args) throws Exception {
if (null == System.getenv("OPENAI_API_KEY")) {
Expand Down Expand Up @@ -46,8 +46,7 @@ public static void main(String[] args) throws Exception {
}

private static void chatExample(ChatModel model) {
var message = UserMessage.from("What is the capital of France?");
var response = model.chat(message);
var response = model.chat(UserMessage.from("What is the capital of France?"));
System.out.println(
"\n~~~ LANGCHAIN4J CHAT RESPONSE: %s\n".formatted(response.aiMessage().text()));
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,66 +1,178 @@
package dev.braintrust.instrumentation.langchain;

import dev.langchain4j.http.client.HttpClientBuilder;
import dev.langchain4j.http.client.HttpClientBuilderLoader;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.service.AiServiceContext;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.tool.ToolExecutor;
import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.api.trace.Tracer;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;

/** Braintrust LangChain4j client instrumentation. */
@Slf4j
public final class BraintrustLangchain {

private static final String INSTRUMENTATION_NAME = "braintrust-langchain4j";

@SuppressWarnings("unchecked")
public static <T> T wrap(OpenTelemetry openTelemetry, AiServices<T> aiServices) {
try {
AiServiceContext context = getPrivateField(aiServices, "context");
Tracer tracer = openTelemetry.getTracer(INSTRUMENTATION_NAME);

// ////// CREATE A LLM SPAN FOR EACH CALL TO AI PROVIDER
var chatModel = context.chatModel;
var streamingChatModel = context.streamingChatModel;
if (chatModel != null) {
if (chatModel instanceof OpenAiChatModel oaiModel) {
aiServices.chatModel(wrap(openTelemetry, oaiModel));
} else {
log.warn(
"unsupported model: {}. LLM calls will not be instrumented",
chatModel.getClass().getName());
}
// intentional fall-through
} else if (streamingChatModel != null) {
if (streamingChatModel instanceof OpenAiStreamingChatModel oaiModel) {
aiServices.streamingChatModel(wrap(openTelemetry, oaiModel));
} else {
log.warn(
"unsupported model: {}. LLM calls will not be instrumented",
streamingChatModel.getClass().getName());
}
// intentional fall-through
} else {
// langchain is going to fail to build. don't apply instrumentation.
throw new RuntimeException("model or chat model must be set");
}

if (context.toolService != null) {
// ////// CREATE A SPAN FOR EACH TOOL CALL
for (Map.Entry<String, ToolExecutor> entry :
context.toolService.toolExecutors().entrySet()) {
String toolName = entry.getKey();
ToolExecutor original = entry.getValue();
entry.setValue(new TracingToolExecutor(original, toolName, tracer));
}

// ////// LINK SPANS ACROSS CONCURRENT TOOL CALLS
var underlyingExecutor = context.toolService.executor();
if (underlyingExecutor != null) {
aiServices.executeToolsConcurrently(
new OtelContextPassingExecutor(underlyingExecutor));
}
}

// ////// CREATE A SPAN ON SERVICE METHOD INVOKE
T service = aiServices.build();
Class<T> serviceInterface = (Class<T>) context.aiServiceClass;
return TracingProxy.create(serviceInterface, service, tracer);
} catch (Exception e) {
log.warn("failed to apply langchain AI services instrumentation", e);
return aiServices.build();
}
}

/** Instrument langchain openai chat model with braintrust traces */
public static OpenAiChatModel wrap(
OpenTelemetry otel, OpenAiChatModel.OpenAiChatModelBuilder builder) {
return wrap(otel, builder.build());
}

private static OpenAiChatModel wrap(OpenTelemetry otel, OpenAiChatModel model) {
try {
HttpClientBuilder underlyingHttpClient = getPrivateField(builder, "httpClientBuilder");
if (underlyingHttpClient == null) {
underlyingHttpClient = HttpClientBuilderLoader.loadHttpClientBuilder();
// Get the internal OpenAiClient from the chat model
Object internalClient = getPrivateField(model, "client");

// Get the HttpClient from the internal client
dev.langchain4j.http.client.HttpClient httpClient =
getPrivateField(internalClient, "httpClient");

if (httpClient instanceof WrappedHttpClient) {
log.debug("model already instrumented. skipping: {}", httpClient.getClass());
return model;
}
HttpClientBuilder wrappedHttpClient =
wrap(otel, underlyingHttpClient, new Options("openai"));
return builder.httpClientBuilder(wrappedHttpClient).build();

// Wrap the HttpClient with our instrumented version
dev.langchain4j.http.client.HttpClient wrappedHttpClient =
new WrappedHttpClient(otel, httpClient, new Options("openai"));

setPrivateField(internalClient, "httpClient", wrappedHttpClient);

return model;
} catch (Exception e) {
log.warn(
"Braintrust instrumentation could not be applied to OpenAiChatModel builder",
e);
return builder.build();
log.warn("failed to instrument OpenAiChatModel", e);
return model;
}
}

/** Instrument langchain openai chat model with braintrust traces */
public static OpenAiStreamingChatModel wrap(
OpenTelemetry otel, OpenAiStreamingChatModel.OpenAiStreamingChatModelBuilder builder) {
return wrap(otel, builder.build());
}

public static OpenAiStreamingChatModel wrap(
OpenTelemetry otel, OpenAiStreamingChatModel model) {
try {
HttpClientBuilder underlyingHttpClient = getPrivateField(builder, "httpClientBuilder");
if (underlyingHttpClient == null) {
underlyingHttpClient = HttpClientBuilderLoader.loadHttpClientBuilder();
// Get the internal OpenAiClient from the streaming chat model
Object internalClient = getPrivateField(model, "client");

// Get the HttpClient from the internal client
dev.langchain4j.http.client.HttpClient httpClient =
getPrivateField(internalClient, "httpClient");

if (httpClient instanceof WrappedHttpClient) {
log.debug("model already instrumented. skipping: {}", httpClient.getClass());
return model;
}
HttpClientBuilder wrappedHttpClient =
wrap(otel, underlyingHttpClient, new Options("openai"));
return builder.httpClientBuilder(wrappedHttpClient).build();

// Wrap the HttpClient with our instrumented version
dev.langchain4j.http.client.HttpClient wrappedHttpClient =
new WrappedHttpClient(otel, httpClient, new Options("openai"));

setPrivateField(internalClient, "httpClient", wrappedHttpClient);

return model;
} catch (Exception e) {
log.warn(
"Braintrust instrumentation could not be applied to OpenAiStreamingChatModel"
+ " builder",
e);
return builder.build();
log.warn("failed to instrument OpenAiStreamingChatModel", e);
return model;
}
}

private static HttpClientBuilder wrap(
OpenTelemetry otel, HttpClientBuilder builder, Options options) {
return new WrappedHttpClientBuilder(otel, builder, options);
}

public record Options(String providerName) {}

@SuppressWarnings("unchecked")
private static <T> T getPrivateField(Object obj, String fieldName)
throws ReflectiveOperationException {
java.lang.reflect.Field field = obj.getClass().getDeclaredField(fieldName);
field.setAccessible(true);
return (T) field.get(obj);
Class<?> clazz = obj.getClass();
while (clazz != null) {
try {
java.lang.reflect.Field field = clazz.getDeclaredField(fieldName);
field.setAccessible(true);
return (T) field.get(obj);
} catch (NoSuchFieldException e) {
clazz = clazz.getSuperclass();
}
}
throw new NoSuchFieldException(fieldName);
}

private static void setPrivateField(Object obj, String fieldName, Object value)
throws ReflectiveOperationException {
Class<?> clazz = obj.getClass();
while (clazz != null) {
try {
java.lang.reflect.Field field = clazz.getDeclaredField(fieldName);
field.setAccessible(true);
field.set(obj, value);
return;
} catch (NoSuchFieldException e) {
clazz = clazz.getSuperclass();
}
}
throw new NoSuchFieldException(fieldName);
}
}
Loading