diff --git a/README.md b/README.md index 6c64c1d9..4912feba 100644 --- a/README.md +++ b/README.md @@ -127,13 +127,19 @@ to `console` methods by using `setAutoCollectConsole(true, true)`. Note that by default `enableWebInstrumentation` will use the connection string for SDK initialization. If you want to use a different one, you can set it as `enableWebInstrumentation(true, "your-connection-string")`. -The TelemetryClient object contains a `config` property with many optional settings. These can be set as follows: +The TelemetryClient constructor accepts optional settings (e.g., `{ useGlobalProviders?: boolean }`, defaults to `true`) and exposes a `config` property with many optional settings. Constructor options example: +```javascript +const client = new appInsights.TelemetryClient(, { useGlobalProviders: false }); +``` +Client `config` properties can be set as follows: ``` client.config.PROPERTYNAME = VALUE; ``` These properties are client specific, so you can configure `appInsights.defaultClient` separately from clients created with `new appInsights.TelemetryClient()`. +> *Important:* OpenTelemetry instrumentations rely on the global provider registry. Both `appInsights.setup().start()` and `new TelemetryClient()` default to `useGlobalProviders: true` so auto-instrumentation works out of the box. Set `{ useGlobalProviders: false }` only when you need an isolated client (e.g., per-tenant/manual-only or tests); auto-instrumentation and all auto-collect “enable*” configs (requests, dependencies, console, etc.) will not target that client, you must emit manually or attach your own processors/exporters. Multiple clients in one process share global providers, so use the opt-out to avoid mixing their telemetry. + | Property | Description | | ------------------------------- |------------------------------------------------------------------------------------------------------------| | proxyHttpUrl | A proxy server for SDK HTTP traffic (Optional, Default pulled from `http_proxy` environment variable) | diff --git a/package-lock.json b/package-lock.json index 82fb275a..058cdedd 100644 --- a/package-lock.json +++ b/package-lock.json @@ -441,7 +441,6 @@ "integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.3", @@ -1060,7 +1059,6 @@ "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", "license": "Apache-2.0", - "peer": true, "engines": { "node": ">=8.0.0" } @@ -2066,7 +2064,6 @@ "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==", "dev": true, "license": "BSD-2-Clause", - "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "5.62.0", "@typescript-eslint/types": "5.62.0", @@ -2255,7 +2252,6 @@ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "license": "MIT", - "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -2478,7 +2474,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "caniuse-lite": "^1.0.30001737", "electron-to-chromium": "^1.5.211", @@ -2813,7 +2808,6 @@ "resolved": "https://registry.npmjs.org/diagnostic-channel/-/diagnostic-channel-1.1.1.tgz", "integrity": "sha512-r2HV5qFkUICyoaKlBEpLKHjxMXATUf/l+h8UZPGBHGLy4DDiY2sOLcIctax4eRnTw5wH2jTMExLntGPJ8eOJxw==", "license": "MIT", - "peer": true, "dependencies": { "semver": "^7.5.3" } @@ -2921,7 +2915,6 @@ "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", @@ -5889,7 +5882,6 @@ "integrity": "sha512-QCh+85mCy+h0IGff8r5XWzOVSbBO+KfeYrMQh7NJ58QujwcE22u+NUSmUxqF+un70P9GXKxa2HCNiTTMJknyjQ==", "dev": true, "license": "Apache-2.0", - "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" diff --git a/package.json b/package.json index 8006c588..49edba95 100644 --- a/package.json +++ b/package.json @@ -75,9 +75,9 @@ "@opentelemetry/api-logs": "^0.208.0", "@opentelemetry/core": "^2.2.0", "@opentelemetry/exporter-logs-otlp-http": "^0.208.0", - "@opentelemetry/exporter-trace-otlp-http": "^0.208.0", - "@opentelemetry/exporter-metrics-otlp-proto": "^0.208.0", "@opentelemetry/exporter-metrics-otlp-http": "^0.208.0", + "@opentelemetry/exporter-metrics-otlp-proto": "^0.208.0", + "@opentelemetry/exporter-trace-otlp-http": "^0.208.0", "@opentelemetry/otlp-exporter-base": "^0.208.0", "@opentelemetry/resources": "^2.2.0", "@opentelemetry/sdk-logs": "^0.208.0", diff --git a/src/shared/configuration/config.ts b/src/shared/configuration/config.ts index 082d4892..a9d7b643 100644 --- a/src/shared/configuration/config.ts +++ b/src/shared/configuration/config.ts @@ -3,10 +3,7 @@ import { AzureMonitorExporterOptions } from "@azure/monitor-opentelemetry-exporter"; import { diag } from "@opentelemetry/api"; -import { - Resource, - defaultResource, -} from "@opentelemetry/resources"; +import { Resource, defaultResource } from "@opentelemetry/resources"; import { JsonConfig } from "./jsonConfig"; import { AzureMonitorOpenTelemetryOptions, OTLPExporterConfig, InstrumentationOptions } from "../../types"; import { logLevelParser } from "../util/logLevelParser"; @@ -169,7 +166,6 @@ export class ApplicationInsightsConfig { private _getDefaultResource(): Resource { // Create a basic resource with default attributes - const resource = defaultResource(); - return resource; + return defaultResource(); } } diff --git a/src/shared/util/attributeLogRecordProcessor.ts b/src/shared/util/attributeLogRecordProcessor.ts index 8b243d02..32cc2858 100644 --- a/src/shared/util/attributeLogRecordProcessor.ts +++ b/src/shared/util/attributeLogRecordProcessor.ts @@ -1,4 +1,4 @@ -import { LogRecordProcessor, SdkLogRecord } from "@opentelemetry/sdk-logs"; +import { LogRecordProcessor, ReadableLogRecord } from "@opentelemetry/sdk-logs"; export class AttributeLogProcessor implements LogRecordProcessor { private _attributes: { [key: string]: string }; @@ -7,8 +7,9 @@ export class AttributeLogProcessor implements LogRecordProcessor { } // Override onEmit to apply log record attributes before exporting - onEmit(record: SdkLogRecord) { - record.setAttributes(this._attributes); + onEmit(record: ReadableLogRecord) { + const attributes = (record as any).attributes || ((record as any).attributes = {}); + Object.assign(attributes, this._attributes); } shutdown(): Promise { diff --git a/src/shim/applicationinsights.ts b/src/shim/applicationinsights.ts index fc439ee1..7477ad01 100644 --- a/src/shim/applicationinsights.ts +++ b/src/shim/applicationinsights.ts @@ -33,7 +33,7 @@ export let defaultClient: TelemetryClient; */ export function setup(setupString?: string) { if (!defaultClient) { - defaultClient = new TelemetryClient(setupString); + defaultClient = new TelemetryClient(setupString, { useGlobalProviders: true }); } else { defaultClient.pushWarningToLog("Setup has already been called once. To set up a new client, please use TelemetryClient instead.") } diff --git a/src/shim/correlationContextManager.ts b/src/shim/correlationContextManager.ts index a4cce6d1..f9ca2668 100644 --- a/src/shim/correlationContextManager.ts +++ b/src/shim/correlationContextManager.ts @@ -56,12 +56,24 @@ export class CorrelationContextManager { activeSpan = trace.getTracer(CONTEXT_NAME).startSpan(CONTEXT_NAME) as Span; } const traceStateObj: TraceState = new TraceState(activeSpan?.spanContext()?.traceState?.serialize()); + const parentSpanId = this._getParentSpanId(activeSpan); - return this.spanToContextObject(activeSpan?.spanContext(), activeSpan?.parentSpanContext?.spanId, activeSpan?.name, traceStateObj); + return this.spanToContextObject(activeSpan?.spanContext(), parentSpanId, activeSpan?.name, traceStateObj); } return null; } + private static _getParentSpanId(span: Span | null): string | undefined { + if (!span) { + return undefined; + } + const spanAny = span as any; + if (typeof spanAny.parentSpanContext === "function") { + return spanAny.parentSpanContext()?.spanId; + } + return spanAny.parentSpanId || spanAny.parentSpanContext?.spanId; + } + /** * Helper to generate objects conforming to the CorrelationContext interface * @param operationId String assigned to a series of related telemetry items - equivalent to OpenTelemetry traceId @@ -181,9 +193,10 @@ export class CorrelationContextManager { if (span) { trace.setSpanContext(context.active(), span.spanContext()); + const parentSpanId = this._getParentSpanId(span); return this.spanToContextObject( span.spanContext(), - span.parentSpanContext?.spanId, + parentSpanId, ); } diff --git a/src/shim/logsApi.ts b/src/shim/logsApi.ts index b6db38d2..3ee19876 100644 --- a/src/shim/logsApi.ts +++ b/src/shim/logsApi.ts @@ -2,7 +2,6 @@ // Licensed under the MIT license. import { Logger as OtelLogger, LogRecord } from "@opentelemetry/api-logs"; -import { SdkLogRecord as SDKLogRecord } from "@opentelemetry/sdk-logs"; import { Attributes, diag } from "@opentelemetry/api"; import { IdGenerator, RandomIdGenerator } from "@opentelemetry/sdk-trace-base"; @@ -71,7 +70,7 @@ export class LogApi { */ public trackTrace(telemetry: Contracts.TraceTelemetry): void { try { - const logRecord = this._traceToLogRecord(telemetry) as SDKLogRecord; + const logRecord = this._traceToLogRecord(telemetry); this._logger.emit(logRecord); } catch (err) { diag.error("Failed to send telemetry.", err); @@ -89,7 +88,7 @@ export class LogApi { try { const logRecord = this._exceptionToLogRecord( telemetry - ) as SDKLogRecord; + ); this._logger.emit(logRecord); } catch (err) { diag.error("Failed to send telemetry.", err); diff --git a/src/shim/telemetryClient.ts b/src/shim/telemetryClient.ts index 4eea6cf7..0f38177f 100644 --- a/src/shim/telemetryClient.ts +++ b/src/shim/telemetryClient.ts @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { Attributes, context, metrics, SpanKind, SpanOptions, SpanStatusCode, diag, trace } from "@opentelemetry/api"; +import { Attributes, Meter, Tracer, context, metrics, SpanKind, SpanOptions, SpanStatusCode, diag, trace } from "@opentelemetry/api"; import { logs } from "@opentelemetry/api-logs"; import { SEMATTRS_DB_STATEMENT, @@ -24,7 +24,8 @@ import { AttributeLogProcessor } from "../shared/util/attributeLogRecordProcesso import { LogApi } from "./logsApi"; import { flushAzureMonitor, shutdownAzureMonitor, useAzureMonitor } from "../main"; import { AzureMonitorOpenTelemetryOptions } from "../types"; -import { UNSUPPORTED_MSG, StatsbeatFeature } from "./types"; +import { TelemetryClientProvider } from "./telemetryClientProvider"; +import { TelemetryClientOptions, UNSUPPORTED_MSG, StatsbeatFeature } from "./types"; import { StatsbeatFeaturesManager } from "../shared/util/statsbeatFeaturesManager"; /** @@ -41,13 +42,17 @@ export class TelemetryClient { private _logApi: LogApi; private _isInitialized: boolean; private _options: AzureMonitorOpenTelemetryOptions; + private _telemetryClientProvider?: TelemetryClientProvider; + private _useGlobalProviders: boolean; + private _manualTracer?: Tracer; + private _manualMeter?: Meter; private _configWarnings: string[] = []; /** * Constructs a new instance of TelemetryClient * @param setupString the Connection String or Instrumentation Key to use (read from environment variable if not specified) */ - constructor(input?: string) { + constructor(input?: string, options?: TelemetryClientOptions) { TelemetryClient._instanceCount++; // Set statsbeat feature if this is the second or subsequent TelemetryClient instance @@ -60,41 +65,38 @@ export class TelemetryClient { this.commonProperties = {}; this.context = new Context(); this._isInitialized = false; + this._useGlobalProviders = options?.useGlobalProviders ?? true; } public initialize() { + if (this._isInitialized) { + return; + } this._isInitialized = true; // Parse shim config to Azure Monitor options this._options = this.config.parseConfig(); - try { - // Create attribute processors with context tags and common properties this._attributeSpanProcessor = new AttributeSpanProcessor({ ...this.context.tags, ...this.commonProperties }); this._attributeLogProcessor = new AttributeLogProcessor({ ...this.context.tags, ...this.commonProperties }); + this._options.spanProcessors = [...(this._options.spanProcessors || []), this._attributeSpanProcessor]; + this._options.logRecordProcessors = [...(this._options.logRecordProcessors || []), this._attributeLogProcessor]; - // Add processors to Azure Monitor options before initialization - if (!this._options.spanProcessors) { - this._options.spanProcessors = []; - } - this._options.spanProcessors.push(this._attributeSpanProcessor); - - if (!this._options.logRecordProcessors) { - this._options.logRecordProcessors = []; + if (this._useGlobalProviders) { + useAzureMonitor(this._options); + } else { + this._telemetryClientProvider = new TelemetryClientProvider(this._options); } - this._options.logRecordProcessors.push(this._attributeLogProcessor); - // Initialize Azure Monitor with processors included - useAzureMonitor(this._options); - - // LoggerProvider would be initialized when client is instantiated - // Get Logger from global provider - this._logApi = new LogApi(logs.getLogger("ApplicationInsightsLogger")); + const logger = this._useGlobalProviders + ? logs.getLogger("ApplicationInsightsLogger") + : this._telemetryClientProvider.getLogger("ApplicationInsightsLogger"); + this._logApi = new LogApi(logger); // Warn if any config warnings were generated during parsing for (let i = 0; i < this._configWarnings.length; i++) { diag.warn(this._configWarnings[i]); } - } + } catch (error) { diag.error(`Failed to initialize TelemetryClient ${error}`); } @@ -167,7 +169,7 @@ export class TelemetryClient { } // Create custom metric try { - const meter = metrics.getMeterProvider().getMeter("ApplicationInsightsMetrics"); + const meter = this._getMeterInstance(); const histogram = meter.createHistogram(telemetry.name); histogram.record(telemetry.value, { ...telemetry.properties, ...this.commonProperties, ...this.context.tags }); } catch (error) { @@ -175,6 +177,26 @@ export class TelemetryClient { } } + private _getTracerInstance(): Tracer { + if (this._telemetryClientProvider) { + if (!this._manualTracer) { + this._manualTracer = this._telemetryClientProvider.getTracer("ApplicationInsightsTracer"); + } + return this._manualTracer; + } + return trace.getTracer("ApplicationInsightsTracer"); + } + + private _getMeterInstance(): Meter { + if (this._telemetryClientProvider) { + if (!this._manualMeter) { + this._manualMeter = this._telemetryClientProvider.getMeter("ApplicationInsightsMetrics"); + } + return this._manualMeter; + } + return metrics.getMeterProvider().getMeter("ApplicationInsightsMetrics"); + } + /** * Log a request. Note that the default client will attempt to collect HTTP requests automatically so only use this for requests * that aren't automatically captured or if you've disabled automatic request collection. @@ -209,7 +231,7 @@ export class TelemetryClient { attributes: attributes, startTime: startTime, }; - const span: any = trace.getTracer("ApplicationInsightsTracer") + const span: any = this._getTracerInstance() .startSpan(telemetry.name, options, ctx); if (telemetry.id) { @@ -285,7 +307,7 @@ export class TelemetryClient { attributes: attributes, startTime: startTime, }; - const span: any = trace.getTracer("ApplicationInsightsTracer") + const span: any = this._getTracerInstance() .startSpan(telemetry.name, options, ctx); if (telemetry.id) { @@ -381,6 +403,9 @@ export class TelemetryClient { * Immediately send all queued telemetry. */ public async flush(): Promise { + if (this._telemetryClientProvider) { + return this._telemetryClientProvider.flush(); + } return flushAzureMonitor(); } @@ -388,6 +413,9 @@ export class TelemetryClient { * Shutdown client */ public async shutdown(): Promise { + if (this._telemetryClientProvider) { + return this._telemetryClientProvider.shutdown(); + } return shutdownAzureMonitor(); } diff --git a/src/shim/telemetryClientProvider.ts b/src/shim/telemetryClientProvider.ts new file mode 100644 index 00000000..7f3a2d0b --- /dev/null +++ b/src/shim/telemetryClientProvider.ts @@ -0,0 +1,171 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { diag } from "@opentelemetry/api"; +import { AzureMonitorLogExporter, AzureMonitorMetricExporter, AzureMonitorTraceExporter } from "@azure/monitor-opentelemetry-exporter"; +import { OTLPLogExporter } from "@opentelemetry/exporter-logs-otlp-http"; +import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-http"; +import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http"; +import { LogRecordProcessor, BatchLogRecordProcessor, LoggerProvider } from "@opentelemetry/sdk-logs"; +import { MetricReader, MeterProvider, PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics"; +import { ParentBasedSampler, Sampler, SpanProcessor, TraceIdRatioBasedSampler, BatchSpanProcessor } from "@opentelemetry/sdk-trace-base"; +import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node"; +import { Resource, defaultResource } from "@opentelemetry/resources"; +import { AzureMonitorOpenTelemetryOptions } from "../types"; + +/** + * Provides isolated OpenTelemetry providers for a TelemetryClient instance. + */ +export class TelemetryClientProvider { + private _tracerProvider: NodeTracerProvider; + private _meterProvider: MeterProvider; + private _loggerProvider: LoggerProvider; + private _metricReaders: MetricReader[] = []; + private _spanProcessors: SpanProcessor[] = []; + private _logProcessors: LogRecordProcessor[] = []; + + constructor(private _options: AzureMonitorOpenTelemetryOptions) { + const resource = this._options.resource ?? defaultResource(); + + this._spanProcessors = this._setupTracing(); + this._logProcessors = this._setupLogging(); + this._metricReaders = this._setupMetrics(); + + this._tracerProvider = new NodeTracerProvider({ + resource, + sampler: this._createSampler(), + spanProcessors: this._spanProcessors, + }); + this._meterProvider = new MeterProvider({ + resource, + readers: this._metricReaders, + }); + this._loggerProvider = new LoggerProvider({ + resource, + processors: this._logProcessors, + }); + } + + public getTracer(name: string) { + return this._tracerProvider.getTracer(name); + } + + public getMeter(name: string) { + return this._meterProvider.getMeter(name); + } + + public getLogger(name: string) { + return this._loggerProvider.getLogger(name); + } + + public async flush() { + await Promise.all([ + this._runWithErrorHandling(this._meterProvider.forceFlush(), "Failed to flush metrics"), + this._runWithErrorHandling(this._tracerProvider.forceFlush(), "Failed to flush traces"), + this._runWithErrorHandling(this._loggerProvider.forceFlush(), "Failed to flush logs"), + ]); + } + + public async shutdown() { + await Promise.all([ + ...this._metricReaders.map((reader) => this._runWithErrorHandling(reader.shutdown(), "Failed to shutdown metric reader")), + ...this._spanProcessors.map((processor) => this._runWithErrorHandling(processor.shutdown(), "Failed to shutdown span processor")), + ...this._logProcessors.map((processor) => this._runWithErrorHandling(processor.shutdown(), "Failed to shutdown log processor")), + this._runWithErrorHandling(this._meterProvider.shutdown(), "Failed to shutdown meter provider"), + this._runWithErrorHandling(this._tracerProvider.shutdown(), "Failed to shutdown tracer provider"), + this._runWithErrorHandling(this._loggerProvider.shutdown(), "Failed to shutdown logger provider"), + ]); + } + + private _createSampler(): Sampler | undefined { + if (this._options?.samplingRatio === undefined) { + return undefined; + } + return new ParentBasedSampler({ + root: new TraceIdRatioBasedSampler(this._options.samplingRatio), + }); + } + + private _setupTracing(): SpanProcessor[] { + const processors: SpanProcessor[] = []; + try { + const exporter = new AzureMonitorTraceExporter(this._options.azureMonitorExporterOptions); + processors.push(new BatchSpanProcessor(exporter)); + } catch (error) { + diag.error("Failed to configure Azure Monitor trace exporter", error); + } + + if (this._options.otlpTraceExporterConfig?.enabled) { + try { + const otlpExporter = new OTLPTraceExporter(this._options.otlpTraceExporterConfig); + processors.push(new BatchSpanProcessor(otlpExporter)); + } catch (error) { + diag.error("Failed to configure OTLP trace exporter", error); + } + } + + if (this._options.spanProcessors) { + for (const processor of this._options.spanProcessors) { + processors.push(processor); + } + } + + return processors; + } + + private _setupLogging(): LogRecordProcessor[] { + const processors: LogRecordProcessor[] = []; + try { + const exporter = new AzureMonitorLogExporter(this._options.azureMonitorExporterOptions); + processors.push(new BatchLogRecordProcessor(exporter)); + } catch (error) { + diag.error("Failed to configure Azure Monitor log exporter", error); + } + + if (this._options.otlpLogExporterConfig?.enabled) { + try { + const otlpExporter = new OTLPLogExporter(this._options.otlpLogExporterConfig); + processors.push(new BatchLogRecordProcessor(otlpExporter)); + } catch (error) { + diag.error("Failed to configure OTLP log exporter", error); + } + } + + if (this._options.logRecordProcessors) { + for (const processor of this._options.logRecordProcessors) { + processors.push(processor); + } + } + + return processors; + } + + private _setupMetrics(): MetricReader[] { + const readers: MetricReader[] = []; + try { + const exporter = new AzureMonitorMetricExporter(this._options.azureMonitorExporterOptions); + readers.push(new PeriodicExportingMetricReader({ exporter })); + } catch (error) { + diag.error("Failed to configure Azure Monitor metric exporter", error); + } + + if (this._options.otlpMetricExporterConfig?.enabled) { + try { + const otlpExporter = new OTLPMetricExporter(this._options.otlpMetricExporterConfig); + readers.push(new PeriodicExportingMetricReader({ exporter: otlpExporter })); + } catch (error) { + diag.error("Failed to configure OTLP metric exporter", error); + } + } + + return readers; + } + + private async _runWithErrorHandling(promise: Promise, message: string) { + try { + await promise; + } catch (error) { + diag.error(message, error); + } + } +} diff --git a/src/shim/types.ts b/src/shim/types.ts index 231cbd4e..b92f323b 100644 --- a/src/shim/types.ts +++ b/src/shim/types.ts @@ -20,6 +20,14 @@ export enum DistributedTracingModes { AI_AND_W3C } +export interface TelemetryClientOptions { + /** + * When true, the TelemetryClient relies on the global Azure Monitor distro for telemetry pipeline. + * Defaults to true so instrumentations work out of the box; set false to isolate the client and use only manual track calls. + */ + useGlobalProviders?: boolean; +} + /** * Interface which defines which specific extended metrics should be disabled * diff --git a/src/types.ts b/src/types.ts index 8df60f05..3a68884f 100644 --- a/src/types.ts +++ b/src/types.ts @@ -4,6 +4,9 @@ import { AzureMonitorOpenTelemetryOptions as DistroOptions, InstrumentationOptions as DistroInstrumentationOptions } from "@azure/monitor-opentelemetry"; import { SeverityNumber } from "@opentelemetry/api-logs"; import { InstrumentationConfig } from "@opentelemetry/instrumentation"; +import { LogRecordProcessor } from "@opentelemetry/sdk-logs"; +import { SpanProcessor } from "@opentelemetry/sdk-trace-base"; +import { MetricReader } from "@opentelemetry/sdk-metrics"; import { OTLPExporterNodeConfigBase } from "@opentelemetry/otlp-exporter-base"; @@ -20,12 +23,18 @@ export interface AzureMonitorOpenTelemetryOptions extends DistroOptions { * if true uncaught exceptions will be sent to Application Insights */ enableAutoCollectExceptions?: boolean; + /** Additional span processors to register */ + spanProcessors?: SpanProcessor[]; + /** Additional log record processors to register */ + logRecordProcessors?: LogRecordProcessor[]; /** OTLP Trace Exporter Configuration */ otlpTraceExporterConfig?: OTLPExporterConfig; /** OTLP Metric Exporter Configuration */ otlpMetricExporterConfig?: OTLPExporterConfig; /** OTLP Log Exporter Configuration */ otlpLogExporterConfig?: OTLPExporterConfig; + /** Additional metric readers to register */ + metricReaders?: MetricReader[]; /** * Sets the state of performance tracking (enabled by default) * if true performance counters will be collected every second and sent to Azure Monitor diff --git a/test/unitTests/logs/api.tests.ts b/test/unitTests/logs/api.tests.ts index ff439d48..5f9a2358 100644 --- a/test/unitTests/logs/api.tests.ts +++ b/test/unitTests/logs/api.tests.ts @@ -3,8 +3,7 @@ import assert from "assert"; import sinon from "sinon"; import nock from "nock"; -import { Logger } from "@opentelemetry/api-logs"; -import { SdkLogRecord } from "@opentelemetry/sdk-logs"; +import { Logger, LogRecord } from "@opentelemetry/api-logs"; import { AvailabilityTelemetry, @@ -40,9 +39,9 @@ describe("logs/API", () => { class TestLogger implements Logger { - public logsEmited: Array = []; + public logsEmited: Array = []; - emit(logRecord: SdkLogRecord): void { + emit(logRecord: LogRecord): void { this.logsEmited.push(logRecord); } } @@ -59,7 +58,7 @@ describe("logs/API", () => { telemetry, "TestData", data, - ) as SdkLogRecord; + ) as LogRecord; assert.equal(JSON.stringify(logRecord.body), JSON.stringify({})); assert.equal(logRecord.attributes["testAttribute"], "testValue"); assert.equal(logRecord.attributes["_MS.baseType"], "TestData"); @@ -77,7 +76,7 @@ describe("logs/API", () => { telemetry, "TestData", data, - ) as SdkLogRecord; + ) as LogRecord; assert.equal(JSON.stringify(logRecord.body), JSON.stringify({})); assert.equal(logRecord.attributes["testAttribute"], "testValue"); const errorStr: string = logRecord.attributes["error"] as string; diff --git a/test/unitTests/logs/console.tests.ts b/test/unitTests/logs/console.tests.ts index 3daa2504..2646762b 100644 --- a/test/unitTests/logs/console.tests.ts +++ b/test/unitTests/logs/console.tests.ts @@ -21,7 +21,7 @@ describe("AutoCollection/Console", () => { logs.disable(); memoryLogExporter = new InMemoryLogRecordExporter(); const loggerProvider = new LoggerProvider({ - processors: [new SimpleLogRecordProcessor(memoryLogExporter)] + processors: [new SimpleLogRecordProcessor(memoryLogExporter)], }); logs.setGlobalLoggerProvider(loggerProvider); }); diff --git a/test/unitTests/shim/config.tests.ts b/test/unitTests/shim/config.tests.ts index 38981931..6b1311b8 100644 --- a/test/unitTests/shim/config.tests.ts +++ b/test/unitTests/shim/config.tests.ts @@ -169,12 +169,13 @@ describe("shim/configuration/config", () => { assert.equal(process.env["APPLICATIONINSIGHTS_INSTRUMENTATION_LOGGING_LEVEL"], "WARN"); }); - it("should set context tags on logs and spans", () => { + it("should set context tags on logs and spans", async () => { const telemetryClient = new TelemetryClient(connectionString); telemetryClient.context.tags = { "ai.cloud.role": "testRole", "ai.cloud.roleInstance": "testRoleInstance" }; telemetryClient.initialize(); telemetryClient["_attributeSpanProcessor"]["_attributes"] = { "ai.cloud.role": "testRole", "ai.cloud.roleInstance": "testRoleInstance" }; telemetryClient["_attributeLogProcessor"]["_attributes"] = { "ai.cloud.role": "testRole", "ai.cloud.roleInstance": "testRoleInstance" }; + await telemetryClient.shutdown(); }); it("should disable instrumentations when noDiagnosticChannel is set", () => { diff --git a/test/unitTests/shim/correlationContextManger.tests.ts b/test/unitTests/shim/correlationContextManger.tests.ts index 6b3c1fe0..f64d5237 100644 --- a/test/unitTests/shim/correlationContextManger.tests.ts +++ b/test/unitTests/shim/correlationContextManger.tests.ts @@ -445,13 +445,14 @@ describe("CorrelationContextManager", () => { describe("with Span input", () => { it("should start a new context using Span", () => { // Setup + const parentSpan = { + traceId: "parentTraceId", + spanId: "parentSpanId", + traceFlags: 1, + }; const mockSpan = { spanContext: () => testSpanContext, - parentSpanContext: () => ({ - traceId: "parentTraceId", - spanId: "parentSpanId", - traceFlags: 1, - }), + parentSpanContext: () => parentSpan, name: "testSpan", } as unknown as Span; @@ -461,7 +462,7 @@ describe("CorrelationContextManager", () => { // Verify assert.ok(context); assert.strictEqual(context.operation.id, testSpanContext.traceId); - assert.strictEqual(context.operation.parentId, mockSpan.parentSpanContext.spanId); + assert.strictEqual(context.operation.parentId, parentSpan.spanId); }); }); diff --git a/test/unitTests/shim/telemetryClient.tests.ts b/test/unitTests/shim/telemetryClient.tests.ts index ce80ec52..33d72c81 100644 --- a/test/unitTests/shim/telemetryClient.tests.ts +++ b/test/unitTests/shim/telemetryClient.tests.ts @@ -1,17 +1,18 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for details. -import assert from "assert"; -import nock from "nock"; -import sinon from "sinon"; +import * as assert from "assert"; +import nock = require("nock"); +import * as sinon from "sinon"; import { Context, ProxyTracerProvider, trace, metrics, diag } from "@opentelemetry/api"; import { ReadableSpan, Span, SpanProcessor } from "@opentelemetry/sdk-trace-base"; +import { LoggerProvider, LogRecordProcessor, ReadableLogRecord } from "@opentelemetry/sdk-logs"; +import { logs } from "@opentelemetry/api-logs"; +import { SEMATTRS_RPC_SYSTEM } from "@opentelemetry/semantic-conventions"; import { DependencyTelemetry, RequestTelemetry } from "../../../src/declarations/contracts"; import { TelemetryClient } from "../../../src/shim/telemetryClient"; +import * as main from "../../../src/main"; import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node"; import { MeterProvider } from "@opentelemetry/sdk-metrics"; -import { SdkLogRecord, LogRecordProcessor, LoggerProvider } from "@opentelemetry/sdk-logs"; -import { logs } from "@opentelemetry/api-logs"; -import { SEMATTRS_RPC_SYSTEM } from "@opentelemetry/semantic-conventions"; import Config = require("../../../src/shim/shim-config"); describe("shim/TelemetryClient", () => { @@ -27,29 +28,26 @@ describe("shim/TelemetryClient", () => { before(() => { sandbox = sinon.createSandbox(); - trace.disable(); - metrics.disable(); nock("https://dc.services.visualstudio.com") .post("/v2.1/track", (body: string) => true) .reply(200, {}) .persist(); nock.disableNetConnect(); + testProcessor = new TestSpanProcessor(); + logProcessor = new TestLogProcessor({}); testProcessor = new TestSpanProcessor(); logProcessor = new TestLogProcessor({}); - + client = new TelemetryClient( "InstrumentationKey=1aa11111-bbbb-1ccc-8ddd-eeeeffff3333" ); client.config.samplingPercentage = 100; client.config.noDiagnosticChannel = true; - - // Add test processors through the Azure Monitor options client.config.azureMonitorOpenTelemetryOptions = { spanProcessors: [testProcessor], logRecordProcessors: [logProcessor] }; - client.initialize(); tracerProvider = ((trace.getTracerProvider() as ProxyTracerProvider).getDelegate() as NodeTracerProvider); loggerProvider = logs.getLoggerProvider() as LoggerProvider; @@ -67,10 +65,10 @@ describe("shim/TelemetryClient", () => { }); - after(() => { + after(async () => { nock.cleanAll(); nock.enableNetConnect(); - client.shutdown(); + await client.shutdown(); }); class TestSpanProcessor implements SpanProcessor { @@ -94,16 +92,17 @@ describe("shim/TelemetryClient", () => { constructor(attributes: { [key: string]: string }) { this._attributes = attributes; } - + // Override onEmit to apply log record attributes before exporting - onEmit(record: SdkLogRecord) { - record.setAttributes(this._attributes); + onEmit(record: ReadableLogRecord) { + const attributes = (record as any).attributes || ((record as any).attributes = {}); + Object.assign(attributes, this._attributes); } - + shutdown(): Promise { return Promise.resolve(); } - + forceFlush(): Promise { return Promise.resolve(); } @@ -115,7 +114,7 @@ describe("shim/TelemetryClient", () => { client.track({ name: "test" } as any, "Event" as any); }, /Not implemented/); }); - + it("addTelemetryProcessor should warn", () => { client.addTelemetryProcessor(() => true); assert.ok(diagWarnStub.calledOnce); @@ -137,7 +136,7 @@ describe("shim/TelemetryClient", () => { const result = client.getStatsbeat(); assert.strictEqual(result, null); }); - + it("setUseDiskRetryCaching throws error", () => { assert.throws(() => { client.setUseDiskRetryCaching(true); @@ -181,8 +180,6 @@ describe("shim/TelemetryClient", () => { success: false, }; client.trackDependency(telemetry); - - await tracerProvider.forceFlush(); const spans = testProcessor.spansProcessed; assert.equal(spans.length, 1); assert.equal(spans[0].name, "TestName"); @@ -204,7 +201,6 @@ describe("shim/TelemetryClient", () => { success: false, }; client.trackDependency(telemetry); - await tracerProvider.forceFlush(); const spans = testProcessor.spansProcessed; assert.equal(spans.length, 1); assert.equal(spans[0].name, "TestName"); @@ -224,7 +220,6 @@ describe("shim/TelemetryClient", () => { success: false, }; client.trackDependency(telemetry); - await tracerProvider.forceFlush(); const spans = testProcessor.spansProcessed; assert.equal(spans.length, 1); assert.equal(spans[0].name, "TestName"); @@ -244,27 +239,27 @@ describe("shim/TelemetryClient", () => { // Call trackDependency without specifying 'time' - should default to current time as END time client.trackDependency(telemetry); const afterCall = Date.now(); - + await tracerProvider.forceFlush(); const spans = testProcessor.spansProcessed; assert.equal(spans.length, 1); assert.equal(spans[0].name, "TestTimingDependency"); - + // Convert span times from hrTime to milliseconds for comparison const spanStartMs = spans[0].startTime[0] * 1000 + spans[0].startTime[1] / 1_000_000; const spanEndMs = spans[0].endTime[0] * 1000 + spans[0].endTime[1] / 1_000_000; - + // Duration should match the specified duration const actualDuration = spanEndMs - spanStartMs; assert.ok(Math.abs(actualDuration - 1000) < 10, `Expected duration ~1000ms, got ${actualDuration}ms`); - + // End time should be close to when we called trackDependency (within reasonable tolerance) - assert.ok(spanEndMs >= beforeCall && spanEndMs <= afterCall + 50, + assert.ok(spanEndMs >= beforeCall && spanEndMs <= afterCall + 50, `End time ${spanEndMs} should be between ${beforeCall} and ${afterCall + 50}`); - + // Start time should be approximately end time minus duration const expectedStartMs = spanEndMs - 1000; - assert.ok(Math.abs(spanStartMs - expectedStartMs) < 10, + assert.ok(Math.abs(spanStartMs - expectedStartMs) < 10, `Start time ${spanStartMs} should be close to ${expectedStartMs}`); }); @@ -279,29 +274,29 @@ describe("shim/TelemetryClient", () => { success: true, time: customStartTime }; - + client.trackDependency(telemetry); - + await tracerProvider.forceFlush(); const spans = testProcessor.spansProcessed; assert.equal(spans.length, 1); assert.equal(spans[0].name, "CustomTimeDependency"); - + // Convert span times from hrTime to milliseconds for comparison const spanStartMs = spans[0].startTime[0] * 1000 + spans[0].startTime[1] / 1_000_000; const spanEndMs = spans[0].endTime[0] * 1000 + spans[0].endTime[1] / 1_000_000; - + // Duration should match the specified duration const actualDuration = spanEndMs - spanStartMs; assert.ok(Math.abs(actualDuration - 2000) < 10, `Expected duration ~2000ms, got ${actualDuration}ms`); - + // Start time should match the custom time provided - assert.ok(Math.abs(spanStartMs - customStartTime.getTime()) < 10, + assert.ok(Math.abs(spanStartMs - customStartTime.getTime()) < 10, `Start time ${spanStartMs} should be close to custom time ${customStartTime.getTime()}`); - + // End time should be start time plus duration const expectedEndMs = customStartTime.getTime() + 2000; - assert.ok(Math.abs(spanEndMs - expectedEndMs) < 10, + assert.ok(Math.abs(spanEndMs - expectedEndMs) < 10, `End time ${spanEndMs} should be close to ${expectedEndMs}`); }); @@ -315,7 +310,6 @@ describe("shim/TelemetryClient", () => { success: false, }; client.trackRequest(telemetry); - await tracerProvider.forceFlush(); const spans = testProcessor.spansProcessed; assert.equal(spans.length, 1); assert.equal(spans[0].name, "TestName"); @@ -356,27 +350,27 @@ describe("shim/TelemetryClient", () => { // Call trackRequest without specifying 'time' - should default to current time as END time client.trackRequest(telemetry); const afterCall = Date.now(); - + await tracerProvider.forceFlush(); const spans = testProcessor.spansProcessed; assert.equal(spans.length, 1); assert.equal(spans[0].name, "TestTimingRequest"); - + // Convert span times from hrTime to milliseconds for comparison const spanStartMs = spans[0].startTime[0] * 1000 + spans[0].startTime[1] / 1_000_000; const spanEndMs = spans[0].endTime[0] * 1000 + spans[0].endTime[1] / 1_000_000; - + // Duration should match the specified duration const actualDuration = spanEndMs - spanStartMs; assert.ok(Math.abs(actualDuration - 1500) < 10, `Expected duration ~1500ms, got ${actualDuration}ms`); - + // End time should be close to when we called trackRequest (within reasonable tolerance) - assert.ok(spanEndMs >= beforeCall && spanEndMs <= afterCall + 50, + assert.ok(spanEndMs >= beforeCall && spanEndMs <= afterCall + 50, `End time ${spanEndMs} should be between ${beforeCall} and ${afterCall + 50}`); - + // Start time should be calculated as end time - duration const expectedStartMs = spanEndMs - 1500; - assert.ok(Math.abs(spanStartMs - expectedStartMs) < 10, + assert.ok(Math.abs(spanStartMs - expectedStartMs) < 10, `Start time ${spanStartMs} should be close to ${expectedStartMs}`); }); @@ -390,26 +384,26 @@ describe("shim/TelemetryClient", () => { success: true, time: customStartTime }; - + client.trackRequest(telemetry); - + await tracerProvider.forceFlush(); const spans = testProcessor.spansProcessed; assert.equal(spans.length, 1); assert.equal(spans[0].name, "CustomTimeRequest"); - + // Convert span times from hrTime to milliseconds for comparison const spanStartMs = spans[0].startTime[0] * 1000 + spans[0].startTime[1] / 1_000_000; const spanEndMs = spans[0].endTime[0] * 1000 + spans[0].endTime[1] / 1_000_000; - + // Start time should match the provided custom time const expectedStartMs = customStartTime.getTime(); - assert.ok(Math.abs(spanStartMs - expectedStartMs) < 10, + assert.ok(Math.abs(spanStartMs - expectedStartMs) < 10, `Start time ${spanStartMs} should be close to ${expectedStartMs}`); - + // End time should be start time + duration const expectedEndMs = customStartTime.getTime() + 1200; - assert.ok(Math.abs(spanEndMs - expectedEndMs) < 10, + assert.ok(Math.abs(spanEndMs - expectedEndMs) < 10, `End time ${spanEndMs} should be close to ${expectedEndMs}`); }); @@ -438,56 +432,86 @@ describe("shim/TelemetryClient", () => { const telemetry = { name: "TestName", value: 100, + properties: { custom: "value" } + }; + client.commonProperties = { common: "prop" }; + client.context.tags = { tag: "value" } as any; + const histogramRecord = sandbox.stub(); + const meterMock = { + createHistogram: sandbox.stub().returns({ + record: histogramRecord, + }) }; - + (client as any)._manualMeter = undefined; + const getMeterStub = sandbox.stub().returns(meterMock as any); + const meterProviderStub = sandbox.stub(metrics, "getMeterProvider").returns({ getMeter: getMeterStub } as any); + + client.trackMetric(telemetry); + + assert.ok(histogramRecord.calledOnce); + assert.strictEqual(histogramRecord.firstCall.args[0], telemetry.value); + assert.deepStrictEqual(histogramRecord.firstCall.args[1], { + ...telemetry.properties, + ...client.commonProperties, + ...client.context.tags + }); + + meterProviderStub.restore(); + // Create spy on the histogram record method to verify metric tracking const originalMeter = metrics.getMeterProvider().getMeter("ApplicationInsightsMetrics"); const histogramRecordSpy = sandbox.spy(); - + // Mock the histogram creation to track record calls const histogramMock = { record: histogramRecordSpy }; - - const createHistogramStub = sandbox.stub(originalMeter, 'createHistogram').returns(histogramMock as any); - + + // Reset cached meter and force the provider to return the meter we are spying on + (client as any)._manualMeter = undefined; + const getMeterStub2 = sandbox.stub().returns(originalMeter as any); + const meterProviderStub2 = sandbox.stub(metrics, "getMeterProvider").returns({ getMeter: getMeterStub2 } as any); + + const createHistogramStub = sandbox.stub(originalMeter, "createHistogram").returns(histogramMock as any); + // Track the metric client.trackMetric(telemetry); - + // Verify that createHistogram was called with the correct name assert.ok(createHistogramStub.calledOnce, "createHistogram should be called once"); assert.equal(createHistogramStub.args[0][0], "TestName", "Histogram should be created with correct name"); - + // Verify that record was called with the correct value assert.ok(histogramRecordSpy.calledOnce, "Histogram record should be called once"); assert.equal(histogramRecordSpy.args[0][0], 100, "Record should be called with correct value"); - + // Verify properties were passed const recordedAttributes = histogramRecordSpy.args[0][1]; assert.ok(recordedAttributes, "Attributes should be passed to record"); + + meterProviderStub2.restore(); }); - - it("trackMetric should handle errors gracefully", async () => { + + it("trackMetric should handle errors gracefully", () => { const telemetry = { name: "ErrorMetric", value: 50, }; - - // Force an error by stubbing metrics.getMeterProvider().getMeter() + + // Force an error by stubbing the isolated meter provider const error = new Error("Failed to get meter"); - const getMeterStub = sandbox.stub(metrics.getMeterProvider(), 'getMeter').throws(error); - + (client as any)._manualMeter = undefined; + const getMeterStub = sandbox.stub().throws(error); + sandbox.stub(metrics, "getMeterProvider").returns({ getMeter: getMeterStub } as any); + // This should now throw an error internally, but the method should catch it client.trackMetric(telemetry); - + // Verify the error was logged assert.ok(diagErrorStub.calledOnce); assert.ok(diagErrorStub.calledWith(`Failed to record metric: ${error}`)); - - // Restore the stub - getMeterStub.restore(); }); - + it("trackAvailability", async () => { const stub = sandbox.stub(logProcessor, "onEmit"); const telemetry = { @@ -499,8 +523,6 @@ describe("shim/TelemetryClient", () => { message: "TestMessage" }; client.trackAvailability(telemetry); - await loggerProvider.forceFlush(); - await new Promise((resolve) => setTimeout(resolve, 800)); assert.ok(stub.calledOnce); }); @@ -512,19 +534,15 @@ describe("shim/TelemetryClient", () => { url: "http://test.com", }; client.trackPageView(telemetry); - await loggerProvider.forceFlush(); - await new Promise((resolve) => setTimeout(resolve, 800)); assert.ok(stub.calledOnce); }); - + it("trackEvent", async () => { const stub = sandbox.stub(logProcessor, "onEmit"); const telemetry = { name: "TestName", }; client.trackEvent(telemetry); - await loggerProvider.forceFlush(); - await new Promise((resolve) => setTimeout(resolve, 800)); assert.ok(stub.calledOnce); }); @@ -534,8 +552,6 @@ describe("shim/TelemetryClient", () => { message: "test message", }; client.trackTrace(telemetry); - await loggerProvider.forceFlush(); - await new Promise((resolve) => setTimeout(resolve, 800)); assert.ok(stub.calledOnce); }); @@ -545,12 +561,35 @@ describe("shim/TelemetryClient", () => { exception: new Error("test error"), }; client.trackException(telemetry); - await loggerProvider.forceFlush(); - await new Promise((resolve) => setTimeout(resolve, 800)); assert.ok(stub.calledOnce); }); }); + describe("initialization modes", () => { + it("does not call useAzureMonitor for isolated clients", async () => { + const useAzureMonitorStub = sandbox.stub(main, "useAzureMonitor"); + const isolatedClient = new TelemetryClient( + "InstrumentationKey=11111111-bbbb-1ccc-8ddd-eeeeffff3334", + { useGlobalProviders: false } + ); + isolatedClient.initialize(); + assert.ok(useAzureMonitorStub.notCalled); + await isolatedClient.shutdown(); + }); + + it("uses global telemetry pipeline when requested (default)", async () => { + const useAzureMonitorStub = sandbox.stub(main, "useAzureMonitor"); + const shutdownStub = sandbox.stub(main, "shutdownAzureMonitor").resolves(); + const globalClient = new TelemetryClient( + "InstrumentationKey=11111111-bbbb-1ccc-8ddd-eeeeffff3335" + ); + globalClient.initialize(); + assert.ok(useAzureMonitorStub.calledOnce); + await globalClient.shutdown(); + assert.ok(shutdownStub.calledOnce); + }); + }); + describe("Instance count tracking and MULTI_IKEY statsbeat feature", () => { let originalEnv: NodeJS.ProcessEnv; @@ -570,7 +609,7 @@ describe("shim/TelemetryClient", () => { it("should not enable MULTI_IKEY feature when creating first TelemetryClient instance", () => { const firstClient = new TelemetryClient("InstrumentationKey=1aa11111-bbbb-1ccc-8ddd-eeeeffff3333"); - + // Check statsbeat features environment variable const statsbeatFeatures = process.env["AZURE_MONITOR_STATSBEAT_FEATURES"]; if (statsbeatFeatures) { @@ -578,28 +617,28 @@ describe("shim/TelemetryClient", () => { // MULTI_IKEY bit should not be set (128) assert.strictEqual((config.feature & 128), 0, "MULTI_IKEY feature should not be enabled for first instance"); } - + firstClient.shutdown(); }); it("should enable MULTI_IKEY feature when creating second TelemetryClient instance", () => { const firstClient = new TelemetryClient("InstrumentationKey=1aa11111-bbbb-1ccc-8ddd-eeeeffff3333"); - + // First instance should not have MULTI_IKEY feature enabled let statsbeatFeatures = process.env["AZURE_MONITOR_STATSBEAT_FEATURES"]; if (statsbeatFeatures) { const config = JSON.parse(statsbeatFeatures); assert.strictEqual((config.feature & 128), 0, "MULTI_IKEY feature should not be enabled for first instance"); } - + const secondClient = new TelemetryClient("InstrumentationKey=2bb22222-cccc-2ddd-9eee-fffff4444444"); - + // Second instance should have MULTI_IKEY feature enabled statsbeatFeatures = process.env["AZURE_MONITOR_STATSBEAT_FEATURES"]; assert.ok(statsbeatFeatures, "AZURE_MONITOR_STATSBEAT_FEATURES should be set"); const config = JSON.parse(statsbeatFeatures); assert.strictEqual((config.feature & 128), 128, "MULTI_IKEY feature should be enabled for second instance"); - + firstClient.shutdown(); secondClient.shutdown(); }); @@ -607,19 +646,19 @@ describe("shim/TelemetryClient", () => { it("should keep MULTI_IKEY feature enabled when creating additional TelemetryClient instances", () => { const firstClient = new TelemetryClient("InstrumentationKey=1aa11111-bbbb-1ccc-8ddd-eeeeffff3333"); const secondClient = new TelemetryClient("InstrumentationKey=2bb22222-cccc-2ddd-9eee-fffff4444444"); - + let statsbeatFeatures = process.env["AZURE_MONITOR_STATSBEAT_FEATURES"]; assert.ok(statsbeatFeatures, "AZURE_MONITOR_STATSBEAT_FEATURES should be set after second instance"); let config = JSON.parse(statsbeatFeatures); assert.strictEqual((config.feature & 128), 128, "MULTI_IKEY feature should be enabled after second instance"); - + const thirdClient = new TelemetryClient("InstrumentationKey=3cc33333-dddd-3eee-afff-ggggg5555555"); - + statsbeatFeatures = process.env["AZURE_MONITOR_STATSBEAT_FEATURES"]; assert.ok(statsbeatFeatures, "AZURE_MONITOR_STATSBEAT_FEATURES should remain set for third instance"); config = JSON.parse(statsbeatFeatures); assert.strictEqual((config.feature & 128), 128, "MULTI_IKEY feature should remain enabled for third instance"); - + firstClient.shutdown(); secondClient.shutdown(); thirdClient.shutdown(); @@ -628,13 +667,13 @@ describe("shim/TelemetryClient", () => { it("should increment instance count correctly for multiple TelemetryClient instances", () => { const firstClient = new TelemetryClient("InstrumentationKey=1aa11111-bbbb-1ccc-8ddd-eeeeffff3333"); assert.strictEqual((TelemetryClient as any)._instanceCount, 1, "Instance count should be 1 after first client"); - + const secondClient = new TelemetryClient("InstrumentationKey=2bb22222-cccc-2ddd-9eee-fffff4444444"); assert.strictEqual((TelemetryClient as any)._instanceCount, 2, "Instance count should be 2 after second client"); - + const thirdClient = new TelemetryClient("InstrumentationKey=3cc33333-dddd-3eee-afff-ggggg5555555"); assert.strictEqual((TelemetryClient as any)._instanceCount, 3, "Instance count should be 3 after third client"); - + firstClient.shutdown(); secondClient.shutdown(); thirdClient.shutdown(); @@ -642,20 +681,20 @@ describe("shim/TelemetryClient", () => { it("should work with different connection strings", () => { const firstClient = new TelemetryClient("InstrumentationKey=1aa11111-bbbb-1ccc-8ddd-eeeeffff3333;IngestionEndpoint=https://eastus-8.in.applicationinsights.azure.com/"); - + let statsbeatFeatures = process.env["AZURE_MONITOR_STATSBEAT_FEATURES"]; if (statsbeatFeatures) { const config = JSON.parse(statsbeatFeatures); assert.strictEqual((config.feature & 128), 0, "MULTI_IKEY feature should not be enabled for first instance with connection string"); } - + const secondClient = new TelemetryClient("InstrumentationKey=2bb22222-cccc-2ddd-9eee-fffff4444444;IngestionEndpoint=https://westus-2.in.applicationinsights.azure.com/"); - + statsbeatFeatures = process.env["AZURE_MONITOR_STATSBEAT_FEATURES"]; assert.ok(statsbeatFeatures, "AZURE_MONITOR_STATSBEAT_FEATURES should be set"); const config = JSON.parse(statsbeatFeatures); assert.strictEqual((config.feature & 128), 128, "MULTI_IKEY feature should be enabled for second instance with different connection string"); - + firstClient.shutdown(); secondClient.shutdown(); }); @@ -663,21 +702,21 @@ describe("shim/TelemetryClient", () => { it("should work when no connection string is provided", () => { const firstClient = new TelemetryClient(); assert.strictEqual((TelemetryClient as any)._instanceCount, 1, "Instance count should be 1 after first client with no connection string"); - + let statsbeatFeatures = process.env["AZURE_MONITOR_STATSBEAT_FEATURES"]; if (statsbeatFeatures) { const config = JSON.parse(statsbeatFeatures); assert.strictEqual((config.feature & 128), 0, "MULTI_IKEY feature should not be enabled for first instance with no connection string"); } - + const secondClient = new TelemetryClient(); assert.strictEqual((TelemetryClient as any)._instanceCount, 2, "Instance count should be 2 after second client with no connection string"); - + statsbeatFeatures = process.env["AZURE_MONITOR_STATSBEAT_FEATURES"]; assert.ok(statsbeatFeatures, "AZURE_MONITOR_STATSBEAT_FEATURES should be set"); const config = JSON.parse(statsbeatFeatures); assert.strictEqual((config.feature & 128), 128, "MULTI_IKEY feature should be enabled for second instance with no connection string"); - + firstClient.shutdown(); secondClient.shutdown(); }); diff --git a/test/unitTests/shim/telemetryClientProvider.tests.ts b/test/unitTests/shim/telemetryClientProvider.tests.ts new file mode 100644 index 00000000..9f31d67c --- /dev/null +++ b/test/unitTests/shim/telemetryClientProvider.tests.ts @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for details. +import * as assert from "assert"; +import * as sinon from "sinon"; +import { Context } from "@opentelemetry/api"; +import { LogRecord } from "@opentelemetry/api-logs"; +import { AzureMonitorLogExporter, AzureMonitorMetricExporter, AzureMonitorTraceExporter } from "@azure/monitor-opentelemetry-exporter"; +import { BatchLogRecordProcessor, LoggerProvider, LogRecordProcessor } from "@opentelemetry/sdk-logs"; +import { MeterProvider, PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics"; +import { BatchSpanProcessor, ReadableSpan, Span, SpanProcessor } from "@opentelemetry/sdk-trace-base"; +import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node"; +import { OTLPLogExporter } from "@opentelemetry/exporter-logs-otlp-http"; +import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-http"; +import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http"; +import { TelemetryClientProvider } from "../../../src/shim/telemetryClientProvider"; +import { AzureMonitorOpenTelemetryOptions } from "../../../src/types"; + +describe("shim/TelemetryClientProvider", () => { + const CONNECTION_STRING = "InstrumentationKey=00000000-0000-0000-0000-000000000000;IngestionEndpoint=https://example.com/"; + let sandbox: sinon.SinonSandbox; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + }); + + function createOptions(overrides: Partial = {}): AzureMonitorOpenTelemetryOptions { + return { + azureMonitorExporterOptions: { + connectionString: CONNECTION_STRING, + }, + ...overrides, + } as AzureMonitorOpenTelemetryOptions; + } + + it("registers Azure Monitor processors by default", () => { + const provider = new TelemetryClientProvider(createOptions()); + + const registeredSpanProcessors = (provider as any)._spanProcessors as SpanProcessor[]; + const registeredLogProcessors = (provider as any)._logProcessors as LogRecordProcessor[]; + const registeredMetricReaders = (provider as any)._metricReaders as PeriodicExportingMetricReader[]; + + assert.ok( + registeredSpanProcessors.some((processor) => + processor instanceof BatchSpanProcessor && (processor as any)._exporter instanceof AzureMonitorTraceExporter + ) + ); + assert.ok( + registeredLogProcessors.some((processor) => + processor instanceof BatchLogRecordProcessor && (processor as any)._exporter instanceof AzureMonitorLogExporter + ) + ); + assert.ok( + registeredMetricReaders.some((reader) => + reader instanceof PeriodicExportingMetricReader && (reader as any)._exporter instanceof AzureMonitorMetricExporter + ) + ); + }); + + it("registers OTLP exporters when enabled", () => { + const provider = new TelemetryClientProvider( + createOptions({ + otlpTraceExporterConfig: { enabled: true, url: "http://localhost/v1/traces" }, + otlpLogExporterConfig: { enabled: true, url: "http://localhost/v1/logs" }, + otlpMetricExporterConfig: { enabled: true, url: "http://localhost/v1/metrics" }, + }) + ); + + const registeredSpanExporters = ((provider as any)._spanProcessors as SpanProcessor[]).map((processor) => (processor as any)._exporter); + const registeredLogExporters = ((provider as any)._logProcessors as LogRecordProcessor[]).map((processor) => (processor as any)._exporter); + const registeredMetricExporters = ((provider as any)._metricReaders as PeriodicExportingMetricReader[]).map((reader) => (reader as any)._exporter); + + assert.ok(registeredSpanExporters.some((exporter) => exporter instanceof AzureMonitorTraceExporter)); + assert.ok(registeredSpanExporters.some((exporter) => exporter instanceof OTLPTraceExporter)); + assert.ok(registeredLogExporters.some((exporter) => exporter instanceof AzureMonitorLogExporter)); + assert.ok(registeredLogExporters.some((exporter) => exporter instanceof OTLPLogExporter)); + assert.ok(registeredMetricExporters.some((exporter) => exporter instanceof AzureMonitorMetricExporter)); + assert.ok(registeredMetricExporters.some((exporter) => exporter instanceof OTLPMetricExporter)); + }); + + it("flushes underlying providers", async () => { + const tracerFlushStub = sandbox.stub(NodeTracerProvider.prototype, "forceFlush").resolves(); + const meterFlushStub = sandbox.stub(MeterProvider.prototype, "forceFlush").resolves(); + const loggerFlushStub = sandbox.stub(LoggerProvider.prototype, "forceFlush").resolves(); + + const provider = new TelemetryClientProvider(createOptions()); + await provider.flush(); + + assert.ok(tracerFlushStub.calledOnce); + assert.ok(meterFlushStub.calledOnce); + assert.ok(loggerFlushStub.calledOnce); + }); + + it("shuts down providers and processors", async () => { + const tracerShutdownStub = sandbox.stub(NodeTracerProvider.prototype, "shutdown").resolves(); + const meterShutdownStub = sandbox.stub(MeterProvider.prototype, "shutdown").resolves(); + const loggerShutdownStub = sandbox.stub(LoggerProvider.prototype, "shutdown").resolves(); + const metricReaderShutdownStub = sandbox.stub(PeriodicExportingMetricReader.prototype, "shutdown").resolves(); + + const spanProcessor = new TestSpanProcessor(sandbox); + const logProcessor = new TestLogProcessor(sandbox); + + const provider = new TelemetryClientProvider( + createOptions({ + spanProcessors: [spanProcessor], + logRecordProcessors: [logProcessor], + }) + ); + + await provider.shutdown(); + + assert.ok(tracerShutdownStub.calledOnce); + assert.ok(meterShutdownStub.calledOnce); + assert.ok(loggerShutdownStub.calledOnce); + assert.ok(metricReaderShutdownStub.called); + assert.ok(spanProcessor.shutdownStub.calledOnce); + assert.ok(logProcessor.shutdownStub.calledOnce); + }); + + class TestSpanProcessor implements SpanProcessor { + public shutdownStub: sinon.SinonStub<[], Promise>; + public forceFlushStub: sinon.SinonStub<[], Promise>; + + constructor(s: sinon.SinonSandbox) { + this.shutdownStub = s.stub<[], Promise>().resolves(); + this.forceFlushStub = s.stub<[], Promise>().resolves(); + } + + onStart(_span: Span, _parentContext: Context): void { + return; + } + + onEnd(_span: ReadableSpan): void { + return; + } + + shutdown(): Promise { + return this.shutdownStub(); + } + + forceFlush(): Promise { + return this.forceFlushStub(); + } + } + + class TestLogProcessor implements LogRecordProcessor { + public onEmitStub: sinon.SinonStub<[LogRecord], void>; + public shutdownStub: sinon.SinonStub<[], Promise>; + public forceFlushStub: sinon.SinonStub<[], Promise>; + + constructor(s: sinon.SinonSandbox) { + this.onEmitStub = s.stub<[LogRecord], void>(); + this.shutdownStub = s.stub<[], Promise>().resolves(); + this.forceFlushStub = s.stub<[], Promise>().resolves(); + } + + onEmit(record: LogRecord): void { + this.onEmitStub(record); + } + + shutdown(): Promise { + return this.shutdownStub(); + } + + forceFlush(): Promise { + return this.forceFlushStub(); + } + } +});