Skip to content

Commit 37a8fd1

Browse files
committed
clean up
1 parent ca7ac93 commit 37a8fd1

4 files changed

Lines changed: 9 additions & 25 deletions

File tree

dd-java-agent/instrumentation/openai-java/openai-java-3.0/src/main/java/datadog/trace/instrumentation/openai_java/CompletionServiceInstrumentation.java

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
import com.openai.core.http.StreamResponse;
1313
import com.openai.models.completions.Completion;
1414
import com.openai.models.completions.CompletionCreateParams;
15-
import datadog.context.ContextScope;
1615
import datadog.trace.agent.tooling.Instrumenter;
1716
import datadog.trace.bootstrap.instrumentation.api.AgentScope;
1817
import datadog.trace.bootstrap.instrumentation.api.AgentSpan;
@@ -46,20 +45,17 @@ public static class CreateAdvice {
4645
@Advice.OnMethodEnter(suppress = Throwable.class)
4746
public static AgentScope enter(
4847
@Advice.Argument(0) final CompletionCreateParams params,
49-
@Advice.FieldValue("clientOptions") ClientOptions clientOptions,
50-
@Advice.Local("llmScope") ContextScope llmScope) {
48+
@Advice.FieldValue("clientOptions") ClientOptions clientOptions) {
5149
AgentSpan span = DECORATE.startSpan(clientOptions);
52-
// llmScope = LLMObsContext.attach(span.context());
53-
// TODO why would we ever need to activate llmScope in this instrumentation if we never expect
54-
// inner llmobs spans
50+
// NOTE: skip LLMObsContext.attach(span.context()) here because never expect an inner
51+
// LLMObsSpan
5552
CompletionDecorator.DECORATE.withCompletionCreateParams(span, params);
5653
return activateSpan(span);
5754
}
5855

5956
@Advice.OnMethodExit(onThrowable = Throwable.class, suppress = Throwable.class)
6057
public static void exit(
6158
@Advice.Enter final AgentScope scope,
62-
@Advice.Local("llmScope") ContextScope llmScope,
6359
@Advice.Return(readOnly = false) HttpResponseFor<Completion> response,
6460
@Advice.Thrown final Throwable err) {
6561
AgentSpan span = scope.span();
@@ -70,7 +66,6 @@ public static void exit(
7066
HttpResponseWrapper.wrap(response, span, CompletionDecorator.DECORATE::withCompletion);
7167
}
7268
scope.close();
73-
// llmScope.close();
7469
}
7570
}
7671

dd-java-agent/instrumentation/openai-java/openai-java-3.0/src/main/java/datadog/trace/instrumentation/openai_java/ResponseDecorator.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ public void withResponseCreateParams(AgentSpan span, ResponseCreateParams params
6464
inputMessages.add(LLMObs.LLMMessage.from("system", instructions));
6565
});
6666

67-
Optional<String> textOpt = params._input().asString(); // TODO cover with unit tests
67+
Optional<String> textOpt = params._input().asString();
6868
if (textOpt.isPresent()) {
6969
inputMessages.add(LLMObs.LLMMessage.from("user", textOpt.get()));
7070
}
@@ -75,7 +75,7 @@ public void withResponseCreateParams(AgentSpan span, ResponseCreateParams params
7575
if (input.isText()) {
7676
inputMessages.add(LLMObs.LLMMessage.from("user", input.asText()));
7777
} else if (input.isResponse()) {
78-
List<ResponseInputItem> inputItems = input.asResponse(); // TODO cover with unit tests
78+
List<ResponseInputItem> inputItems = input.asResponse();
7979
for (ResponseInputItem item : inputItems) {
8080
LLMObs.LLMMessage message = extractInputItemMessage(item);
8181
if (message != null) {

dd-java-agent/instrumentation/openai-java/openai-java-3.0/src/test/groovy/ChatCompletionServiceTest.groovy

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,8 @@ class ChatCompletionServiceTest extends OpenAiTest {
256256
assertChatCompletionTrace(true, outputTag)
257257
and:
258258
outputTag.size() == 3
259-
outputTag.each { msg ->
259+
outputTag.each {
260+
msg ->
260261
assert msg.role == "assistant"
261262
assert msg.content == "Hello, world!"
262263
}

dd-java-agent/instrumentation/openai-java/openai-java-3.0/src/test/groovy/ResponseServiceTest.groovy

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -161,9 +161,8 @@ class ResponseServiceTest extends OpenAiTest {
161161
}
162162
163163
def "create streaming response with tool input test"() {
164-
// Tests the strongly-typed path: ResponseInputItem objects → params._input().asKnown()
165164
runnableUnderTrace("parent") {
166-
StreamResponse<ResponseStreamEvent> streamResponse = openAiClient.responses().createStreaming(responseCreateParamsWithToolInput(true)) // TODO
165+
StreamResponse<ResponseStreamEvent> streamResponse = openAiClient.responses().createStreaming(responseCreateParamsWithToolInput(true))
167166
try (Stream stream = streamResponse.stream()) {
168167
stream.forEach {
169168
// consume the stream
@@ -175,17 +174,6 @@ class ResponseServiceTest extends OpenAiTest {
175174
assertResponseTrace(true, "gpt-4.1", "gpt-4.1-2025-04-14", null)
176175
}
177176
178-
// NOTE: responseCreateParamsWithToolInput(true) creates raw JSON via JsonValue.from()
179-
// This exercises the asUnknown() → asArray() parsing path in ResponseDecorator
180-
// However, it cannot be unit tested here because:
181-
// 1. Raw JSON serializes differently than typed objects
182-
// 2. No matching HTTP recording exists in the mock backend
183-
// 3. The test would fail with InternalServerException
184-
//
185-
// This path IS tested by the shared integration test:
186-
// llm-obs/test/test_openai.py::test_responses_create_tool_input
187-
// which represents the real cross-language use case (Python → Java test server)
188-
189177
private void assertResponseTrace(boolean isStreaming, String reqModel, String respModel, Map reasoning) {
190178
assertTraces(1) {
191179
trace(3) {
@@ -207,7 +195,7 @@ class ResponseServiceTest extends OpenAiTest {
207195
"_ml_obs_tag.model_name" String
208196
"_ml_obs_tag.metadata" Map
209197
"_ml_obs_tag.input" List
210-
"_ml_obs_tag.output" List // TODO capture to validate tool calls
198+
"_ml_obs_tag.output" List
211199
"_ml_obs_metric.input_tokens" Long
212200
"_ml_obs_metric.output_tokens" Long
213201
"_ml_obs_metric.total_tokens" Long

0 commit comments

Comments
 (0)