diff --git a/src/client/testing/common/debugLauncher.ts b/src/client/testing/common/debugLauncher.ts index c28535b30644..ec319d9915eb 100644 --- a/src/client/testing/common/debugLauncher.ts +++ b/src/client/testing/common/debugLauncher.ts @@ -1,6 +1,6 @@ import { inject, injectable, named } from 'inversify'; import * as path from 'path'; -import { DebugConfiguration, l10n, Uri, WorkspaceFolder, DebugSession, DebugSessionOptions } from 'vscode'; +import { DebugConfiguration, l10n, Uri, WorkspaceFolder, DebugSession, DebugSessionOptions, Disposable } from 'vscode'; import { IApplicationShell, IDebugService } from '../../common/application/types'; import { EXTENSION_ROOT_DIR } from '../../common/constants'; import * as internalScripts from '../../common/process/internal/scripts'; @@ -17,6 +17,14 @@ import { getWorkspaceFolder, getWorkspaceFolders } from '../../common/vscodeApis import { showErrorMessage } from '../../common/vscodeApis/windowApis'; import { createDeferred } from '../../common/utils/async'; import { addPathToPythonpath } from './helpers'; +import * as envExtApi from '../../envExt/api.internal'; + +/** + * Key used to mark debug configurations with a unique session identifier. + * This allows us to track which debug session belongs to which launchDebugger() call + * when multiple debug sessions are launched in parallel. + */ +const TEST_SESSION_MARKER_KEY = '__vscodeTestSessionMarker'; @injectable() export class DebugLauncher implements ITestDebugLauncher { @@ -31,6 +39,10 @@ export class DebugLauncher implements ITestDebugLauncher { this.configService = this.serviceContainer.get(IConfigurationService); } + /** + * Launches a debug session for test execution. + * Handles cancellation, multi-session support via unique markers, and cleanup. + */ public async launchDebugger( options: LaunchOptions, callback?: () => void, @@ -38,18 +50,35 @@ export class DebugLauncher implements ITestDebugLauncher { ): Promise { const deferred = createDeferred(); let hasCallbackBeenCalled = false; + + // Collect disposables for cleanup when debugging completes + const disposables: Disposable[] = []; + + // Ensure callback is only invoked once, even if multiple termination paths fire + const callCallbackOnce = () => { + if (!hasCallbackBeenCalled) { + hasCallbackBeenCalled = true; + callback?.(); + } + }; + + // Early exit if already cancelled before we start if (options.token && options.token.isCancellationRequested) { - hasCallbackBeenCalled = true; - return undefined; + callCallbackOnce(); deferred.resolve(); - callback?.(); + return deferred.promise; } - options.token?.onCancellationRequested(() => { - deferred.resolve(); - callback?.(); - hasCallbackBeenCalled = true; - }); + // Listen for cancellation from the test run (e.g., user clicks stop in Test Explorer) + // This allows the caller to clean up resources even if the debug session is still running + if (options.token) { + disposables.push( + options.token.onCancellationRequested(() => { + deferred.resolve(); + callCallbackOnce(); + }), + ); + } const workspaceFolder = DebugLauncher.resolveWorkspaceFolder(options.cwd); const launchArgs = await this.getLaunchArgs( @@ -59,23 +88,49 @@ export class DebugLauncher implements ITestDebugLauncher { ); const debugManager = this.serviceContainer.get(IDebugService); - let activatedDebugSession: DebugSession | undefined; - debugManager.startDebugging(workspaceFolder, launchArgs, sessionOptions).then(() => { - // Save the debug session after it is started so we can check if it is the one that was terminated. - activatedDebugSession = debugManager.activeDebugSession; - }); - debugManager.onDidTerminateDebugSession((session) => { - traceVerbose(`Debug session terminated. sessionId: ${session.id}`); - // Only resolve no callback has been made and the session is the one that was started. - if ( - !hasCallbackBeenCalled && - activatedDebugSession !== undefined && - session.id === activatedDebugSession?.id - ) { - deferred.resolve(); - callback?.(); - } + // Unique marker to identify this session among concurrent debug sessions + const sessionMarker = `test-${Date.now()}-${Math.random().toString(36).substring(7)}`; + launchArgs[TEST_SESSION_MARKER_KEY] = sessionMarker; + + let ourSession: DebugSession | undefined; + + // Capture our specific debug session when it starts by matching the marker. + // This fires for ALL debug sessions, so we filter to only our marker. + disposables.push( + debugManager.onDidStartDebugSession((session) => { + if (session.configuration[TEST_SESSION_MARKER_KEY] === sessionMarker) { + ourSession = session; + traceVerbose(`[test-debug] Debug session started: ${session.name} (${session.id})`); + } + }), + ); + + // Handle debug session termination (user stops debugging, or tests complete). + // Only react to OUR session terminating - other parallel sessions should + // continue running independently. + disposables.push( + debugManager.onDidTerminateDebugSession((session) => { + if (ourSession && session.id === ourSession.id) { + traceVerbose(`[test-debug] Debug session terminated: ${session.name} (${session.id})`); + deferred.resolve(); + callCallbackOnce(); + } + }), + ); + + // Start the debug session + const started = await debugManager.startDebugging(workspaceFolder, launchArgs, sessionOptions); + if (!started) { + traceError('Failed to start debug session'); + deferred.resolve(); + callCallbackOnce(); + } + + // Clean up event subscriptions when debugging completes (success, failure, or cancellation) + deferred.promise.finally(() => { + disposables.forEach((d) => d.dispose()); }); + return deferred.promise; } @@ -108,6 +163,12 @@ export class DebugLauncher implements ITestDebugLauncher { subProcess: true, }; } + + // Use project name in debug session name if provided + if (options.project) { + debugConfig.name = `Debug Tests: ${options.project.name}`; + } + if (!debugConfig.rules) { debugConfig.rules = []; } @@ -257,6 +318,23 @@ export class DebugLauncher implements ITestDebugLauncher { // run via F5 style debugging. launchArgs.purpose = []; + // For project-based execution, get the Python path from the project's environment. + // Fallback: if env API unavailable or fails, LaunchConfigurationResolver already set + // launchArgs.python from the active interpreter, so debugging still works. + if (options.project && envExtApi.useEnvExtension()) { + try { + const pythonEnv = await envExtApi.getEnvironment(options.project.uri); + if (pythonEnv?.execInfo?.run?.executable) { + launchArgs.python = pythonEnv.execInfo.run.executable; + traceVerbose( + `[test-by-project] Debug session using Python path from project: ${launchArgs.python}`, + ); + } + } catch (error) { + traceVerbose(`[test-by-project] Could not get environment for project, using default: ${error}`); + } + } + return launchArgs; } diff --git a/src/client/testing/common/types.ts b/src/client/testing/common/types.ts index 562005386633..e2fa2d6d2e5a 100644 --- a/src/client/testing/common/types.ts +++ b/src/client/testing/common/types.ts @@ -2,6 +2,7 @@ import { CancellationToken, DebugSessionOptions, OutputChannel, Uri } from 'vsco import { Product } from '../../common/types'; import { TestSettingsPropertyNames } from '../configuration/types'; import { TestProvider } from '../types'; +import { PythonProject } from '../../envExt/types'; export type UnitTestProduct = Product.pytest | Product.unittest; @@ -26,6 +27,8 @@ export type LaunchOptions = { pytestPort?: string; pytestUUID?: string; runTestIdsPort?: string; + /** Optional Python project for project-based execution. */ + project?: PythonProject; }; export enum TestFilter { diff --git a/src/client/testing/testController/common/projectTestExecution.ts b/src/client/testing/testController/common/projectTestExecution.ts new file mode 100644 index 000000000000..970c216f6d50 --- /dev/null +++ b/src/client/testing/testController/common/projectTestExecution.ts @@ -0,0 +1,309 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { CancellationToken, FileCoverageDetail, TestItem, TestRun, TestRunProfileKind, TestRunRequest } from 'vscode'; +import { traceError, traceInfo, traceVerbose, traceWarn } from '../../../logging'; +import { sendTelemetryEvent } from '../../../telemetry'; +import { EventName } from '../../../telemetry/constants'; +import { IPythonExecutionFactory } from '../../../common/process/types'; +import { ITestDebugLauncher } from '../../common/types'; +import { ProjectAdapter } from './projectAdapter'; +import { TestProjectRegistry } from './testProjectRegistry'; +import { getProjectId } from './projectUtils'; +import { getEnvExtApi, useEnvExtension } from '../../../envExt/api.internal'; + +/** + * Dependencies required for project-based test execution. + * Passed to execution functions to avoid tight coupling to the controller. + */ +export interface ProjectExecutionDependencies { + projectRegistry: TestProjectRegistry; + pythonExecFactory: IPythonExecutionFactory; + debugLauncher: ITestDebugLauncher; +} + +/** + * Executes tests for multiple projects within a workspace (project-based mode). + * Groups test items by their owning project and executes each project's tests + * using that project's Python environment. + * + * Cancellation is handled at multiple levels: + * 1. Before starting each project's execution (checked here) + * 2. Within each execution adapter (via runInstance.token) + */ +export async function executeTestsForProjects( + projects: ProjectAdapter[], + testItems: TestItem[], + runInstance: TestRun, + request: TestRunRequest, + token: CancellationToken, + deps: ProjectExecutionDependencies, +): Promise { + if (projects.length === 0) { + traceError(`[test-by-project] No projects provided for execution`); + return; + } + + // Early exit if already cancelled + if (token.isCancellationRequested) { + traceInfo(`[test-by-project] Execution cancelled before starting`); + return; + } + + // Group test items by project + const testsByProject = await groupTestItemsByProject(testItems, projects); + + const isDebugMode = request.profile?.kind === TestRunProfileKind.Debug; + traceInfo(`[test-by-project] Executing tests across ${testsByProject.size} project(s), debug=${isDebugMode}`); + + // Execute tests for each project in parallel + // For debug mode, multiple debug sessions will be launched in parallel + // Each execution respects cancellation via runInstance.token + const executions = Array.from(testsByProject.entries()).map(async ([_projectId, { project, items }]) => { + // Check for cancellation before starting each project + if (token.isCancellationRequested) { + traceInfo(`[test-by-project] Skipping ${project.projectName} - cancellation requested`); + return; + } + + if (items.length === 0) return; + + traceInfo(`[test-by-project] Executing ${items.length} test item(s) for project: ${project.projectName}`); + + sendTelemetryEvent(EventName.UNITTEST_RUN, undefined, { + tool: project.testProvider, + debugging: isDebugMode, + }); + + // Setup coverage for this project if needed + if (request.profile?.kind === TestRunProfileKind.Coverage) { + setupCoverageForProject(request, project); + } + + try { + await executeTestsForProject(project, items, runInstance, request, deps); + } catch (error) { + // Don't log cancellation as an error + if (!token.isCancellationRequested) { + traceError(`[test-by-project] Execution failed for project ${project.projectName}:`, error); + } + } + }); + + await Promise.all(executions); + + if (token.isCancellationRequested) { + traceInfo(`[test-by-project] Project executions cancelled`); + } else { + traceInfo(`[test-by-project] All project executions completed`); + } +} + +/** + * Lookup context for avoiding redundant API calls within a single test run. + */ +interface ProjectLookupContext { + /** Maps file URI fsPath → resolved ProjectAdapter (or undefined if no match) */ + uriToAdapter: Map; + /** Maps project URI fsPath → ProjectAdapter for O(1) adapter lookup */ + projectPathToAdapter: Map; +} + +/** + * Groups test items by their owning project. Uses env API when available, else falls back to path matching. + * Time complexity: O(n + p) amortized via per-run caching. + */ +export async function groupTestItemsByProject( + testItems: TestItem[], + projects: ProjectAdapter[], +): Promise> { + const result = new Map(); + + // Initialize entries for all projects + for (const project of projects) { + result.set(getProjectId(project.projectUri), { project, items: [] }); + } + + // Build lookup context for this run - O(p) one-time setup, enables O(1) lookups per item. + // When tests are from a single project, most lookups hit the cache after the first item. + const lookupContext: ProjectLookupContext = { + uriToAdapter: new Map(), + projectPathToAdapter: new Map(projects.map((p) => [p.projectUri.fsPath, p])), + }; + + // Assign each test item to its project + for (const item of testItems) { + const project = await findProjectForTestItem(item, projects, lookupContext); + if (project) { + const entry = result.get(getProjectId(project.projectUri)); + if (entry) { + entry.items.push(item); + } + } else { + // If no project matches, log it + traceWarn(`[test-by-project] Could not match test item ${item.id} to a project`); + } + } + + // Remove projects with no test items + for (const [projectId, entry] of result.entries()) { + if (entry.items.length === 0) { + result.delete(projectId); + } + } + + return result; +} + +/** + * Finds the project that owns a test item. Uses env API when available, else path-based matching. + */ +export async function findProjectForTestItem( + item: TestItem, + projects: ProjectAdapter[], + lookupContext?: ProjectLookupContext, +): Promise { + if (!item.uri) return undefined; + + const uriPath = item.uri.fsPath; + + // Check lookup context first - O(1) + if (lookupContext?.uriToAdapter.has(uriPath)) { + return lookupContext.uriToAdapter.get(uriPath); + } + + let result: ProjectAdapter | undefined; + + // Try using the Python Environment extension API first. + // Legacy path: when useEnvExtension() is false, this block is skipped and we go + // directly to findProjectByPath() below (path-based matching). + if (useEnvExtension()) { + try { + const envExtApi = await getEnvExtApi(); + const pythonProject = envExtApi.getPythonProject(item.uri); + if (pythonProject) { + // Use lookup context for O(1) adapter lookup instead of O(p) linear search + result = lookupContext?.projectPathToAdapter.get(pythonProject.uri.fsPath); + if (!result) { + // Fallback to linear search if lookup context not available + result = projects.find((p) => p.projectUri.fsPath === pythonProject.uri.fsPath); + } + } + } catch (error) { + traceVerbose(`[test-by-project] Failed to use env extension API, falling back to path matching: ${error}`); + } + } + + // Fallback: path-based matching when env API unavailable or didn't find a match. + // O(p) time complexity where p = number of projects. + if (!result) { + result = findProjectByPath(item, projects); + } + + // Store result for future lookups of same file within this run - O(1) + if (lookupContext) { + lookupContext.uriToAdapter.set(uriPath, result); + } + + return result; +} + +/** + * Fallback: finds project using path-based matching. O(p) time complexity. + */ +function findProjectByPath(item: TestItem, projects: ProjectAdapter[]): ProjectAdapter | undefined { + if (!item.uri) return undefined; + + const itemPath = item.uri.fsPath; + let bestMatch: ProjectAdapter | undefined; + let bestMatchLength = 0; + + for (const project of projects) { + const projectPath = project.projectUri.fsPath; + // Check if the item's path starts with the project's path + if (itemPath.startsWith(projectPath) && projectPath.length > bestMatchLength) { + bestMatch = project; + bestMatchLength = projectPath.length; + } + } + + return bestMatch; +} + +/** + * Executes tests for a single project using the project's Python environment. + */ +export async function executeTestsForProject( + project: ProjectAdapter, + testItems: TestItem[], + runInstance: TestRun, + request: TestRunRequest, + deps: ProjectExecutionDependencies, +): Promise { + const testCaseIds: string[] = []; + + // Mark items as started and collect test IDs + for (const item of testItems) { + // Recursively get test case nodes if this is a parent node + const testCaseNodes = getTestCaseNodesRecursive(item); + for (const node of testCaseNodes) { + runInstance.started(node); + const runId = project.resultResolver.vsIdToRunId.get(node.id); + if (runId) { + testCaseIds.push(runId); + } + } + } + + if (testCaseIds.length === 0) { + traceVerbose(`[test-by-project] No test IDs found for project ${project.projectName}`); + return; + } + + traceInfo(`[test-by-project] Running ${testCaseIds.length} test(s) for project: ${project.projectName}`); + + // Execute tests using the project's execution adapter + await project.executionAdapter.runTests( + project.projectUri, + testCaseIds, + request.profile?.kind, + runInstance, + deps.pythonExecFactory, + deps.debugLauncher, + undefined, // interpreter not needed, project has its own environment + project, + ); +} + +/** + * Recursively gets all test case nodes from a test item tree. + */ +export function getTestCaseNodesRecursive(item: TestItem): TestItem[] { + const results: TestItem[] = []; + if (item.children.size === 0) { + // This is a leaf node (test case) + results.push(item); + } else { + // Recursively get children + item.children.forEach((child) => { + results.push(...getTestCaseNodesRecursive(child)); + }); + } + return results; +} + +/** + * Sets up detailed coverage loading for a project. + */ +export function setupCoverageForProject(request: TestRunRequest, project: ProjectAdapter): void { + if (request.profile?.kind === TestRunProfileKind.Coverage) { + request.profile.loadDetailedCoverage = ( + _testRun: TestRun, + fileCoverage, + _token, + ): Thenable => { + const details = project.resultResolver.detailedCoverageMap.get(fileCoverage.uri.fsPath); + return Promise.resolve(details ?? []); + }; + } +} diff --git a/src/client/testing/testController/controller.ts b/src/client/testing/testController/controller.ts index 036658e2af9e..ea04ecbd1438 100644 --- a/src/client/testing/testController/controller.ts +++ b/src/client/testing/testController/controller.ts @@ -45,6 +45,7 @@ import { IEnvironmentVariablesProvider } from '../../common/variables/types'; import { ProjectAdapter } from './common/projectAdapter'; import { TestProjectRegistry } from './common/testProjectRegistry'; import { createTestAdapters, getProjectId } from './common/projectUtils'; +import { executeTestsForProjects } from './common/projectTestExecution'; import { useEnvExtension, getEnvExtApi } from '../../envExt/api.internal'; import { DidChangePythonProjectsEventArgs, PythonProject } from '../../envExt/types'; @@ -611,11 +612,15 @@ export class PythonTestController implements ITestController, IExtensionSingleAc await Promise.all( workspaces.map(async (workspace) => { - if (!(await this.interpreterService.getActiveInterpreter(workspace.uri))) { - this.commandManager - .executeCommand(constants.Commands.TriggerEnvironmentSelection, workspace.uri) - .then(noop, noop); - return; + // In project-based mode, each project has its own environment, + // so we don't require a global active interpreter + if (!useEnvExtension()) { + if (!(await this.interpreterService.getActiveInterpreter(workspace.uri))) { + this.commandManager + .executeCommand(constants.Commands.TriggerEnvironmentSelection, workspace.uri) + .then(noop, noop); + return; + } } await this.discoverTestsInWorkspace(workspace.uri); }), @@ -698,9 +703,13 @@ export class PythonTestController implements ITestController, IExtensionSingleAc const workspaces: readonly WorkspaceFolder[] = this.workspaceService.workspaceFolders || []; await Promise.all( workspaces.map(async (workspace) => { - if (!(await this.interpreterService.getActiveInterpreter(workspace.uri))) { - traceError('Cannot trigger test discovery as a valid interpreter is not selected'); - return; + // In project-based mode, each project has its own environment, + // so we don't require a global active interpreter + if (!useEnvExtension()) { + if (!(await this.interpreterService.getActiveInterpreter(workspace.uri))) { + traceError('Cannot trigger test discovery as a valid interpreter is not selected'); + return; + } } await this.refreshTestDataInternal(workspace.uri); }), @@ -784,6 +793,17 @@ export class PythonTestController implements ITestController, IExtensionSingleAc return; } + // Check if we're in project-based mode and should use project-specific execution + if (this.projectRegistry.hasProjects(workspace.uri) && settings.testing.pytestEnabled) { + const projects = this.projectRegistry.getProjectsArray(workspace.uri); + await executeTestsForProjects(projects, testItems, runInstance, request, token, { + projectRegistry: this.projectRegistry, + pythonExecFactory: this.pythonExecFactory, + debugLauncher: this.debugLauncher, + }); + return; + } + const testAdapter = this.testAdapters.get(workspace.uri) || (this.testAdapters.values().next().value as WorkspaceTestAdapter); diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 6c950ec7e01b..34e4c5933c72 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -116,6 +116,16 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { if (profileKind && profileKind === TestRunProfileKind.Coverage) { mutableEnv.COVERAGE_ENABLED = 'True'; } + + // Set PROJECT_ROOT_PATH for project-based testing + // This tells the Python side where to root the test tree for multi-project workspaces + if (project) { + mutableEnv.PROJECT_ROOT_PATH = project.projectUri.fsPath; + traceInfo( + `[test-by-project] Setting PROJECT_ROOT_PATH=${project.projectUri.fsPath} for ${project.projectName}`, + ); + } + const debugBool = profileKind && profileKind === TestRunProfileKind.Debug; // Create the Python environment in which to execute the command. @@ -166,6 +176,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { testProvider: PYTEST_PROVIDER, runTestIdsPort: testIdsFileName, pytestPort: resultNamedPipeName, + // Pass project for project-based debugging (Python path and session name derived from this) + project: project?.pythonProject, }; const sessionOptions: DebugSessionOptions = { testRun: runInstance, @@ -179,7 +191,9 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { sessionOptions, ); } else if (useEnvExtension()) { - const pythonEnv = await getEnvironment(uri); + // For project-based execution, use the project's Python environment + // Otherwise, fall back to getting the environment from the URI + const pythonEnv = project?.pythonEnvironment ?? (await getEnvironment(uri)); if (pythonEnv) { const deferredTillExecClose: Deferred = utils.createTestingDeferred(); diff --git a/src/client/testing/testController/unittest/testExecutionAdapter.ts b/src/client/testing/testController/unittest/testExecutionAdapter.ts index 85f09cd0b1f5..967f9529ea2f 100644 --- a/src/client/testing/testController/unittest/testExecutionAdapter.ts +++ b/src/client/testing/testController/unittest/testExecutionAdapter.ts @@ -27,6 +27,7 @@ import { ITestDebugLauncher, LaunchOptions } from '../../common/types'; import { UNITTEST_PROVIDER } from '../../common/constants'; import * as utils from '../common/utils'; import { getEnvironment, runInBackground, useEnvExtension } from '../../../envExt/api.internal'; +import { PythonEnvironment } from '../../../pythonEnvironments/info'; import { ProjectAdapter } from '../common/projectAdapter'; /** @@ -47,9 +48,18 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { runInstance: TestRun, executionFactory: IPythonExecutionFactory, debugLauncher?: ITestDebugLauncher, - _interpreter?: unknown, // Not used - kept for interface compatibility + interpreter?: PythonEnvironment, project?: ProjectAdapter, ): Promise { + // Note: project parameter is currently unused for unittest. + // Project-based unittest execution will be implemented in a future PR. + console.log( + 'interpreter, project parameters are currently unused in UnittestTestExecutionAdapter, they will be used in a future implementation of project-based unittest execution.:', + { + interpreter, + project, + }, + ); // deferredTillServerClose awaits named pipe server close const deferredTillServerClose: Deferred = utils.createTestingDeferred(); diff --git a/src/test/testing/common/debugLauncher.unit.test.ts b/src/test/testing/common/debugLauncher.unit.test.ts index 397ae03eafc2..86e862103bf6 100644 --- a/src/test/testing/common/debugLauncher.unit.test.ts +++ b/src/test/testing/common/debugLauncher.unit.test.ts @@ -30,6 +30,7 @@ import { TestProvider } from '../../../client/testing/types'; import { isOs, OSType } from '../../common'; import { IEnvironmentActivationService } from '../../../client/interpreter/activation/types'; import { createDeferred } from '../../../client/common/utils/async'; +import * as envExtApi from '../../../client/envExt/api.internal'; use(chaiAsPromised.default); @@ -106,7 +107,7 @@ suite('Unit Tests - Debug Launcher', () => { ); } function setupDebugManager( - workspaceFolder: WorkspaceFolder, + _workspaceFolder: WorkspaceFolder, expected: DebugConfiguration, testProvider: TestProvider, ) { @@ -123,35 +124,48 @@ suite('Unit Tests - Debug Launcher', () => { .returns(() => Promise.resolve(expected.env)); const deferred = createDeferred(); + let capturedConfig: DebugConfiguration | undefined; + // Use TypeMoq.It.isAny() because the implementation adds a session marker to the config debugService - .setup((d) => - d.startDebugging(TypeMoq.It.isValue(workspaceFolder), TypeMoq.It.isValue(expected), undefined), - ) - .returns((_wspc: WorkspaceFolder, _expectedParam: DebugConfiguration) => { + .setup((d) => d.startDebugging(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())) + .callback((_wspc: WorkspaceFolder, config: DebugConfiguration) => { + capturedConfig = config; deferred.resolve(); - return Promise.resolve(undefined as any); - }); - - // create a fake debug session that the debug service will return on terminate - const fakeDebugSession = TypeMoq.Mock.ofType(); - fakeDebugSession.setup((ds) => ds.id).returns(() => 'id-val'); - const debugSessionInstance = fakeDebugSession.object; + }) + .returns(() => Promise.resolve(true)); + // Setup onDidStartDebugSession - the new implementation uses this to capture the session debugService - .setup((d) => d.activeDebugSession) - .returns(() => debugSessionInstance) - .verifiable(TypeMoq.Times.once()); + .setup((d) => d.onDidStartDebugSession(TypeMoq.It.isAny())) + .returns((callback) => { + deferred.promise.then(() => { + if (capturedConfig) { + callback(({ + id: 'test-session-id', + configuration: capturedConfig, + } as unknown) as DebugSession); + } + }); + return { dispose: () => {} }; + }); + // Setup onDidTerminateDebugSession - fires after the session starts debugService .setup((d) => d.onDidTerminateDebugSession(TypeMoq.It.isAny())) .returns((callback) => { deferred.promise.then(() => { - callback(debugSessionInstance); + setTimeout(() => { + if (capturedConfig) { + callback(({ + id: 'test-session-id', + configuration: capturedConfig, + } as unknown) as DebugSession); + } + }, 10); }); - return undefined as any; - }) - .verifiable(TypeMoq.Times.once()); + return { dispose: () => {} }; + }); } function createWorkspaceFolder(folderPath: string): WorkspaceFolder { return { @@ -692,4 +706,228 @@ suite('Unit Tests - Debug Launcher', () => { expect(configs).to.be.deep.equal([]); }); + + // ===== PROJECT-BASED DEBUG SESSION TESTS ===== + + suite('Project-based debug sessions', () => { + function setupForProjectTests(options: LaunchOptions) { + interpreterService + .setup((i) => i.getActiveInterpreter(TypeMoq.It.isAny())) + .returns(() => Promise.resolve(({ path: 'python' } as unknown) as PythonEnvironment)); + settings.setup((p) => p.envFile).returns(() => __filename); + + debugEnvHelper + .setup((x) => x.getEnvironmentVariables(TypeMoq.It.isAny(), TypeMoq.It.isAny())) + .returns(() => Promise.resolve({})); + + const workspaceFolders = [{ index: 0, name: 'test', uri: Uri.file(options.cwd) }]; + getWorkspaceFoldersStub.returns(workspaceFolders); + getWorkspaceFolderStub.returns(workspaceFolders[0]); + pathExistsStub.resolves(false); + + // Stub useEnvExtension to avoid null reference errors in tests + sinon.stub(envExtApi, 'useEnvExtension').returns(false); + } + + /** + * Helper to setup debug service mocks with proper session lifecycle simulation. + * The implementation uses onDidStartDebugSession to capture the session via marker, + * then onDidTerminateDebugSession to resolve when that session ends. + */ + function setupDebugServiceWithSessionLifecycle(): { + capturedConfigs: DebugConfiguration[]; + } { + const capturedConfigs: DebugConfiguration[] = []; + let startCallback: ((session: DebugSession) => void) | undefined; + let terminateCallback: ((session: DebugSession) => void) | undefined; + + debugService + .setup((d) => d.startDebugging(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())) + .callback((_, config) => { + capturedConfigs.push(config); + // Simulate the full session lifecycle after startDebugging resolves + setTimeout(() => { + const session = ({ + id: `session-${capturedConfigs.length}`, + configuration: config, + } as unknown) as DebugSession; + // Fire start first (so ourSession is captured) + startCallback?.(session); + // Then fire terminate (so the promise resolves) + setTimeout(() => terminateCallback?.(session), 5); + }, 5); + }) + .returns(() => Promise.resolve(true)); + + debugService + .setup((d) => d.onDidStartDebugSession(TypeMoq.It.isAny())) + .callback((cb) => { + startCallback = cb; + }) + .returns(() => ({ dispose: () => {} })); + + debugService + .setup((d) => d.onDidTerminateDebugSession(TypeMoq.It.isAny())) + .callback((cb) => { + terminateCallback = cb; + }) + .returns(() => ({ dispose: () => {} })); + + return { capturedConfigs }; + } + + test('should use project name in config name when provided', async () => { + const options: LaunchOptions = { + cwd: 'one/two/three', + args: ['/one/two/three/testfile.py'], + testProvider: 'pytest', + runTestIdsPort: 'runTestIdsPort', + pytestPort: 'pytestPort', + project: { name: 'myproject (Python 3.11)', uri: Uri.file('one/two/three') }, + }; + + setupForProjectTests(options); + const { capturedConfigs } = setupDebugServiceWithSessionLifecycle(); + + await debugLauncher.launchDebugger(options); + + expect(capturedConfigs).to.have.length(1); + expect(capturedConfigs[0].name).to.equal('Debug Tests: myproject (Python 3.11)'); + }); + + test('should use default python when no project provided', async () => { + const options: LaunchOptions = { + cwd: 'one/two/three', + args: ['/one/two/three/testfile.py'], + testProvider: 'pytest', + runTestIdsPort: 'runTestIdsPort', + pytestPort: 'pytestPort', + }; + + setupForProjectTests(options); + const { capturedConfigs } = setupDebugServiceWithSessionLifecycle(); + + await debugLauncher.launchDebugger(options); + + expect(capturedConfigs).to.have.length(1); + // Should use the default 'python' from interpreterService mock + expect(capturedConfigs[0].python).to.equal('python'); + }); + + test('should add unique session marker to launch config', async () => { + const options: LaunchOptions = { + cwd: 'one/two/three', + args: ['/one/two/three/testfile.py'], + testProvider: 'pytest', + runTestIdsPort: 'runTestIdsPort', + pytestPort: 'pytestPort', + }; + + setupForProjectTests(options); + const { capturedConfigs } = setupDebugServiceWithSessionLifecycle(); + + await debugLauncher.launchDebugger(options); + + expect(capturedConfigs).to.have.length(1); + // Should have a session marker of format 'test-{timestamp}-{random}' + const marker = (capturedConfigs[0] as any).__vscodeTestSessionMarker; + expect(marker).to.be.a('string'); + expect(marker).to.match(/^test-\d+-[a-z0-9]+$/); + }); + + test('should generate unique markers for each launch', async () => { + const options: LaunchOptions = { + cwd: 'one/two/three', + args: ['/one/two/three/testfile.py'], + testProvider: 'pytest', + runTestIdsPort: 'runTestIdsPort', + pytestPort: 'pytestPort', + }; + + setupForProjectTests(options); + const { capturedConfigs } = setupDebugServiceWithSessionLifecycle(); + + // Launch twice + await debugLauncher.launchDebugger(options); + await debugLauncher.launchDebugger(options); + + expect(capturedConfigs).to.have.length(2); + const marker1 = (capturedConfigs[0] as any).__vscodeTestSessionMarker; + const marker2 = (capturedConfigs[1] as any).__vscodeTestSessionMarker; + expect(marker1).to.not.equal(marker2); + }); + + test('should only resolve when matching session terminates', async () => { + const options: LaunchOptions = { + cwd: 'one/two/three', + args: ['/one/two/three/testfile.py'], + testProvider: 'pytest', + runTestIdsPort: 'runTestIdsPort', + pytestPort: 'pytestPort', + }; + + setupForProjectTests(options); + + let capturedConfig: DebugConfiguration | undefined; + let terminateCallback: ((session: DebugSession) => void) | undefined; + let startCallback: ((session: DebugSession) => void) | undefined; + + debugService + .setup((d) => d.startDebugging(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())) + .callback((_, config) => { + capturedConfig = config; + }) + .returns(() => Promise.resolve(true)); + + debugService + .setup((d) => d.onDidStartDebugSession(TypeMoq.It.isAny())) + .callback((cb) => { + startCallback = cb; + }) + .returns(() => ({ dispose: () => {} })); + + debugService + .setup((d) => d.onDidTerminateDebugSession(TypeMoq.It.isAny())) + .callback((cb) => { + terminateCallback = cb; + }) + .returns(() => ({ dispose: () => {} })); + + const launchPromise = debugLauncher.launchDebugger(options); + + // Wait for config to be captured + await new Promise((r) => setTimeout(r, 10)); + + // Simulate our session starting + const ourSession = ({ + id: 'our-session-id', + configuration: capturedConfig!, + } as unknown) as DebugSession; + startCallback?.(ourSession); + + // Create a different session (like another project's debug) + const otherSession = ({ + id: 'other-session-id', + configuration: { __vscodeTestSessionMarker: 'different-marker' }, + } as unknown) as DebugSession; + + // Terminate the OTHER session first - should NOT resolve our promise + terminateCallback?.(otherSession); + + // Wait a bit to ensure it didn't resolve + let resolved = false; + const checkPromise = launchPromise.then(() => { + resolved = true; + }); + + await new Promise((r) => setTimeout(r, 20)); + expect(resolved).to.be.false; + + // Now terminate OUR session - should resolve + terminateCallback?.(ourSession); + + await checkPromise; + expect(resolved).to.be.true; + }); + }); }); diff --git a/src/test/testing/testController/common/projectTestExecution.unit.test.ts b/src/test/testing/testController/common/projectTestExecution.unit.test.ts new file mode 100644 index 000000000000..cbd0a972cdd4 --- /dev/null +++ b/src/test/testing/testController/common/projectTestExecution.unit.test.ts @@ -0,0 +1,704 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { expect } from 'chai'; +import * as sinon from 'sinon'; +import * as typemoq from 'typemoq'; +import { + CancellationToken, + CancellationTokenSource, + TestRun, + TestRunProfile, + TestRunProfileKind, + TestRunRequest, + Uri, +} from 'vscode'; +import { + createMockDependencies, + createMockProjectAdapter, + createMockTestItem, + createMockTestItemWithoutUri, + createMockTestRun, +} from '../testMocks'; +import { + executeTestsForProject, + executeTestsForProjects, + findProjectForTestItem, + getTestCaseNodesRecursive, + groupTestItemsByProject, + setupCoverageForProject, +} from '../../../../client/testing/testController/common/projectTestExecution'; +import * as telemetry from '../../../../client/telemetry'; +import * as envExtApi from '../../../../client/envExt/api.internal'; + +suite('Project Test Execution', () => { + let sandbox: sinon.SinonSandbox; + let useEnvExtensionStub: sinon.SinonStub; + + setup(() => { + sandbox = sinon.createSandbox(); + // Default to disabled env extension for path-based fallback tests + useEnvExtensionStub = sandbox.stub(envExtApi, 'useEnvExtension').returns(false); + }); + + teardown(() => { + sandbox.restore(); + }); + + // ===== findProjectForTestItem Tests ===== + + suite('findProjectForTestItem', () => { + test('should return undefined when test item has no URI', async () => { + // Mock + const item = createMockTestItemWithoutUri('test1'); + const projects = [createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' })]; + + // Run + const result = await findProjectForTestItem(item, projects); + + // Assert + expect(result).to.be.undefined; + }); + + test('should return matching project when item path is within project directory', async () => { + // Mock + const item = createMockTestItem('test1', '/workspace/proj/tests/test_file.py'); + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + + // Run + const result = await findProjectForTestItem(item, [project]); + + // Assert + expect(result).to.equal(project); + }); + + test('should return undefined when item path is outside all project directories', async () => { + // Mock + const item = createMockTestItem('test1', '/other/path/test.py'); + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + + // Run + const result = await findProjectForTestItem(item, [project]); + + // Assert + expect(result).to.be.undefined; + }); + + test('should return most specific (deepest) project when nested projects exist', async () => { + // Mock - parent and child project with overlapping paths + const item = createMockTestItem('test1', '/workspace/parent/child/tests/test.py'); + const parentProject = createMockProjectAdapter({ projectPath: '/workspace/parent', projectName: 'parent' }); + const childProject = createMockProjectAdapter({ + projectPath: '/workspace/parent/child', + projectName: 'child', + }); + + // Run + const result = await findProjectForTestItem(item, [parentProject, childProject]); + + // Assert - should match child (longer path) not parent + expect(result).to.equal(childProject); + }); + + test('should return most specific project regardless of input order', async () => { + // Mock - same as above but different order + const item = createMockTestItem('test1', '/workspace/parent/child/tests/test.py'); + const parentProject = createMockProjectAdapter({ projectPath: '/workspace/parent', projectName: 'parent' }); + const childProject = createMockProjectAdapter({ + projectPath: '/workspace/parent/child', + projectName: 'child', + }); + + // Run - pass child first, then parent + const result = await findProjectForTestItem(item, [childProject, parentProject]); + + // Assert - order shouldn't affect result + expect(result).to.equal(childProject); + }); + + test('should match item at project root level', async () => { + // Mock + const item = createMockTestItem('test1', '/workspace/proj/test.py'); + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + + // Run + const result = await findProjectForTestItem(item, [project]); + + // Assert + expect(result).to.equal(project); + }); + + test('should use env extension API when available', async () => { + // Enable env extension + useEnvExtensionStub.returns(true); + + // Mock the env extension API + const item = createMockTestItem('test1', '/workspace/proj/tests/test_file.py'); + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + + const mockEnvApi = { + getPythonProject: sandbox.stub().returns({ uri: project.projectUri }), + }; + sandbox.stub(envExtApi, 'getEnvExtApi').resolves(mockEnvApi as any); + + // Run + const result = await findProjectForTestItem(item, [project]); + + // Assert + expect(result).to.equal(project); + expect(mockEnvApi.getPythonProject.calledOnceWith(item.uri)).to.be.true; + }); + + test('should fall back to path matching when env extension API is unavailable', async () => { + // Env extension enabled but throws + useEnvExtensionStub.returns(true); + sandbox.stub(envExtApi, 'getEnvExtApi').rejects(new Error('API unavailable')); + + // Mock + const item = createMockTestItem('test1', '/workspace/proj/tests/test_file.py'); + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + + // Run + const result = await findProjectForTestItem(item, [project]); + + // Assert - should still work via fallback + expect(result).to.equal(project); + }); + }); + + // ===== groupTestItemsByProject Tests ===== + + suite('groupTestItemsByProject', () => { + test('should group single test item to its matching project', async () => { + // Mock + const item = createMockTestItem('test1', '/workspace/proj/test.py'); + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + + // Run + const result = await groupTestItemsByProject([item], [project]); + + // Assert + expect(result.size).to.equal(1); + const entry = Array.from(result.values())[0]; + expect(entry.project).to.equal(project); + expect(entry.items).to.deep.equal([item]); + }); + + test('should aggregate multiple items belonging to same project', async () => { + // Mock + const item1 = createMockTestItem('test1', '/workspace/proj/tests/test1.py'); + const item2 = createMockTestItem('test2', '/workspace/proj/tests/test2.py'); + const item3 = createMockTestItem('test3', '/workspace/proj/test3.py'); + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + + // Run + const result = await groupTestItemsByProject([item1, item2, item3], [project]); + + // Assert - use Set for order-agnostic comparison + expect(result.size).to.equal(1); + const entry = Array.from(result.values())[0]; + expect(entry.items).to.have.length(3); + expect(new Set(entry.items)).to.deep.equal(new Set([item1, item2, item3])); + }); + + test('should separate items into groups by their owning project', async () => { + // Mock + const item1 = createMockTestItem('test1', '/workspace/proj1/test.py'); + const item2 = createMockTestItem('test2', '/workspace/proj2/test.py'); + const item3 = createMockTestItem('test3', '/workspace/proj1/other_test.py'); + const proj1 = createMockProjectAdapter({ projectPath: '/workspace/proj1', projectName: 'proj1' }); + const proj2 = createMockProjectAdapter({ projectPath: '/workspace/proj2', projectName: 'proj2' }); + + // Run + const result = await groupTestItemsByProject([item1, item2, item3], [proj1, proj2]); + + // Assert - use Set for order-agnostic comparison + expect(result.size).to.equal(2); + const proj1Entry = result.get(proj1.projectUri.toString()); + const proj2Entry = result.get(proj2.projectUri.toString()); + expect(proj1Entry?.items).to.have.length(2); + expect(new Set(proj1Entry?.items)).to.deep.equal(new Set([item1, item3])); + expect(proj2Entry?.items).to.deep.equal([item2]); + }); + + test('should return empty map when no test items provided', async () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + + // Run + const result = await groupTestItemsByProject([], [project]); + + // Assert + expect(result.size).to.equal(0); + }); + + test('should exclude items that do not match any project path', async () => { + // Mock + const item = createMockTestItem('test1', '/other/path/test.py'); + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + + // Run + const result = await groupTestItemsByProject([item], [project]); + + // Assert + expect(result.size).to.equal(0); + }); + + test('should assign item to most specific (deepest) project for nested paths', async () => { + // Mock + const item = createMockTestItem('test1', '/workspace/parent/child/test.py'); + const parentProject = createMockProjectAdapter({ projectPath: '/workspace/parent', projectName: 'parent' }); + const childProject = createMockProjectAdapter({ + projectPath: '/workspace/parent/child', + projectName: 'child', + }); + + // Run + const result = await groupTestItemsByProject([item], [parentProject, childProject]); + + // Assert + expect(result.size).to.equal(1); + const entry = result.get(childProject.projectUri.toString()); + expect(entry?.project).to.equal(childProject); + expect(entry?.items).to.deep.equal([item]); + }); + + test('should omit projects that have no matching test items', async () => { + // Mock + const item = createMockTestItem('test1', '/workspace/proj1/test.py'); + const proj1 = createMockProjectAdapter({ projectPath: '/workspace/proj1', projectName: 'proj1' }); + const proj2 = createMockProjectAdapter({ projectPath: '/workspace/proj2', projectName: 'proj2' }); + + // Run + const result = await groupTestItemsByProject([item], [proj1, proj2]); + + // Assert + expect(result.size).to.equal(1); + expect(result.has(proj1.projectUri.toString())).to.be.true; + expect(result.has(proj2.projectUri.toString())).to.be.false; + }); + }); + + // ===== getTestCaseNodesRecursive Tests ===== + + suite('getTestCaseNodesRecursive', () => { + test('should return single item when it is a leaf node with no children', () => { + // Mock + const item = createMockTestItem('test_func', '/test.py'); + + // Run + const result = getTestCaseNodesRecursive(item); + + // Assert + expect(result).to.deep.equal([item]); + }); + + test('should return all leaf nodes from single-level nested structure', () => { + // Mock + const leaf1 = createMockTestItem('test_method1', '/test.py'); + const leaf2 = createMockTestItem('test_method2', '/test.py'); + const classItem = createMockTestItem('TestClass', '/test.py', [leaf1, leaf2]); + + // Run + const result = getTestCaseNodesRecursive(classItem); + + // Assert - use Set for order-agnostic comparison + expect(result).to.have.length(2); + expect(new Set(result)).to.deep.equal(new Set([leaf1, leaf2])); + }); + + test('should traverse deeply nested structure to find all leaf nodes', () => { + // Mock - 3 levels deep: file → class → inner class → test + const leaf1 = createMockTestItem('test1', '/test.py'); + const leaf2 = createMockTestItem('test2', '/test.py'); + const innerClass = createMockTestItem('InnerClass', '/test.py', [leaf2]); + const outerClass = createMockTestItem('OuterClass', '/test.py', [leaf1, innerClass]); + const fileItem = createMockTestItem('test_file.py', '/test.py', [outerClass]); + + // Run + const result = getTestCaseNodesRecursive(fileItem); + + // Assert - use Set for order-agnostic comparison + expect(result).to.have.length(2); + expect(new Set(result)).to.deep.equal(new Set([leaf1, leaf2])); + }); + + test('should collect leaves from multiple sibling branches', () => { + // Mock - multiple test classes at same level + const leaf1 = createMockTestItem('test1', '/test.py'); + const leaf2 = createMockTestItem('test2', '/test.py'); + const leaf3 = createMockTestItem('test3', '/test.py'); + const class1 = createMockTestItem('Class1', '/test.py', [leaf1]); + const class2 = createMockTestItem('Class2', '/test.py', [leaf2, leaf3]); + const fileItem = createMockTestItem('test_file.py', '/test.py', [class1, class2]); + + // Run + const result = getTestCaseNodesRecursive(fileItem); + + // Assert - use Set for order-agnostic comparison + expect(result).to.have.length(3); + expect(new Set(result)).to.deep.equal(new Set([leaf1, leaf2, leaf3])); + }); + }); + + // ===== executeTestsForProject Tests ===== + + suite('executeTestsForProject', () => { + test('should call executionAdapter.runTests with project URI and mapped test IDs', async () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + project.resultResolver.vsIdToRunId.set('test1', 'test_file.py::test1'); + const testItem = createMockTestItem('test1', '/workspace/proj/test.py'); + const runMock = createMockTestRun(); + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProject(project, [testItem], runMock.object, request, deps); + + // Assert + expect(project.executionAdapterStub.calledOnce).to.be.true; + const callArgs = project.executionAdapterStub.firstCall.args; + expect(callArgs[0].fsPath).to.equal(project.projectUri.fsPath); // uri + expect(callArgs[1]).to.deep.equal(['test_file.py::test1']); // testCaseIds + expect(callArgs[7]).to.equal(project); // project + }); + + test('should mark all leaf test items as started in the test run', async () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + project.resultResolver.vsIdToRunId.set('test1', 'runId1'); + project.resultResolver.vsIdToRunId.set('test2', 'runId2'); + const item1 = createMockTestItem('test1', '/workspace/proj/test.py'); + const item2 = createMockTestItem('test2', '/workspace/proj/test.py'); + const runMock = createMockTestRun(); + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProject(project, [item1, item2], runMock.object, request, deps); + + // Assert - both items marked as started + runMock.verify((r) => r.started(item1), typemoq.Times.once()); + runMock.verify((r) => r.started(item2), typemoq.Times.once()); + }); + + test('should resolve test IDs via resultResolver.vsIdToRunId mapping', async () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + project.resultResolver.vsIdToRunId.set('test1', 'path/to/test1'); + project.resultResolver.vsIdToRunId.set('test2', 'path/to/test2'); + const item1 = createMockTestItem('test1', '/workspace/proj/test.py'); + const item2 = createMockTestItem('test2', '/workspace/proj/test.py'); + const runMock = createMockTestRun(); + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProject(project, [item1, item2], runMock.object, request, deps); + + // Assert - use Set for order-agnostic comparison + const passedTestIds = project.executionAdapterStub.firstCall.args[1] as string[]; + expect(new Set(passedTestIds)).to.deep.equal(new Set(['path/to/test1', 'path/to/test2'])); + }); + + test('should skip execution when no items have vsIdToRunId mappings', async () => { + // Mock - no mappings set, so lookups return undefined + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + const item = createMockTestItem('unmapped_test', '/workspace/proj/test.py'); + const runMock = createMockTestRun(); + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProject(project, [item], runMock.object, request, deps); + + // Assert - execution adapter never called + expect(project.executionAdapterStub.called).to.be.false; + }); + + test('should recursively expand nested test items to find leaf nodes', async () => { + // Mock - class containing two test methods + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + const leaf1 = createMockTestItem('test1', '/workspace/proj/test.py'); + const leaf2 = createMockTestItem('test2', '/workspace/proj/test.py'); + const classItem = createMockTestItem('TestClass', '/workspace/proj/test.py', [leaf1, leaf2]); + project.resultResolver.vsIdToRunId.set('test1', 'runId1'); + project.resultResolver.vsIdToRunId.set('test2', 'runId2'); + const runMock = createMockTestRun(); + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProject(project, [classItem], runMock.object, request, deps); + + // Assert - leaf nodes marked as started, not the parent class + runMock.verify((r) => r.started(leaf1), typemoq.Times.once()); + runMock.verify((r) => r.started(leaf2), typemoq.Times.once()); + const passedTestIds = project.executionAdapterStub.firstCall.args[1] as string[]; + expect(passedTestIds).to.have.length(2); + }); + }); + + // ===== executeTestsForProjects Tests ===== + + suite('executeTestsForProjects', () => { + let telemetryStub: sinon.SinonStub; + + setup(() => { + telemetryStub = sandbox.stub(telemetry, 'sendTelemetryEvent'); + }); + + test('should return immediately when empty projects array provided', async () => { + // Mock + const runMock = createMockTestRun(); + const token = new CancellationTokenSource().token; + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProjects([], [], runMock.object, request, token, deps); + + // Assert - no telemetry sent since no projects executed + expect(telemetryStub.called).to.be.false; + }); + + test('should skip execution when cancellation requested before start', async () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + const item = createMockTestItem('test1', '/workspace/proj/test.py'); + const runMock = createMockTestRun(); + const tokenSource = new CancellationTokenSource(); + tokenSource.cancel(); // Pre-cancel + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProjects([project], [item], runMock.object, request, tokenSource.token, deps); + + // Assert - execution adapter never called + expect(project.executionAdapterStub.called).to.be.false; + }); + + test('should execute tests for each project when multiple projects provided', async () => { + // Mock + const proj1 = createMockProjectAdapter({ projectPath: '/workspace/proj1', projectName: 'proj1' }); + const proj2 = createMockProjectAdapter({ projectPath: '/workspace/proj2', projectName: 'proj2' }); + proj1.resultResolver.vsIdToRunId.set('test1', 'runId1'); + proj2.resultResolver.vsIdToRunId.set('test2', 'runId2'); + const item1 = createMockTestItem('test1', '/workspace/proj1/test.py'); + const item2 = createMockTestItem('test2', '/workspace/proj2/test.py'); + const runMock = createMockTestRun(); + const token = new CancellationTokenSource().token; + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProjects([proj1, proj2], [item1, item2], runMock.object, request, token, deps); + + // Assert - both projects had their execution adapters called + expect(proj1.executionAdapterStub.calledOnce).to.be.true; + expect(proj2.executionAdapterStub.calledOnce).to.be.true; + }); + + test('should emit telemetry event for each project execution', async () => { + // Mock + const proj1 = createMockProjectAdapter({ projectPath: '/workspace/proj1', projectName: 'proj1' }); + const proj2 = createMockProjectAdapter({ projectPath: '/workspace/proj2', projectName: 'proj2' }); + proj1.resultResolver.vsIdToRunId.set('test1', 'runId1'); + proj2.resultResolver.vsIdToRunId.set('test2', 'runId2'); + const item1 = createMockTestItem('test1', '/workspace/proj1/test.py'); + const item2 = createMockTestItem('test2', '/workspace/proj2/test.py'); + const runMock = createMockTestRun(); + const token = new CancellationTokenSource().token; + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProjects([proj1, proj2], [item1, item2], runMock.object, request, token, deps); + + // Assert - telemetry sent twice (once per project) + expect(telemetryStub.callCount).to.equal(2); + }); + + test('should stop processing remaining projects when cancellation requested mid-execution', async () => { + // Mock + const tokenSource = new CancellationTokenSource(); + const proj1 = createMockProjectAdapter({ projectPath: '/workspace/proj1', projectName: 'proj1' }); + const proj2 = createMockProjectAdapter({ projectPath: '/workspace/proj2', projectName: 'proj2' }); + // First project triggers cancellation during its execution + proj1.executionAdapterStub.callsFake(async () => { + tokenSource.cancel(); + }); + proj1.resultResolver.vsIdToRunId.set('test1', 'runId1'); + proj2.resultResolver.vsIdToRunId.set('test2', 'runId2'); + const item1 = createMockTestItem('test1', '/workspace/proj1/test.py'); + const item2 = createMockTestItem('test2', '/workspace/proj2/test.py'); + const runMock = createMockTestRun(); + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProjects( + [proj1, proj2], + [item1, item2], + runMock.object, + request, + tokenSource.token, + deps, + ); + + // Assert - first project executed, second may be skipped due to cancellation check + expect(proj1.executionAdapterStub.calledOnce).to.be.true; + }); + + test('should continue executing remaining projects when one project fails', async () => { + // Mock + const proj1 = createMockProjectAdapter({ projectPath: '/workspace/proj1', projectName: 'proj1' }); + const proj2 = createMockProjectAdapter({ projectPath: '/workspace/proj2', projectName: 'proj2' }); + proj1.executionAdapterStub.rejects(new Error('Execution failed')); + proj1.resultResolver.vsIdToRunId.set('test1', 'runId1'); + proj2.resultResolver.vsIdToRunId.set('test2', 'runId2'); + const item1 = createMockTestItem('test1', '/workspace/proj1/test.py'); + const item2 = createMockTestItem('test2', '/workspace/proj2/test.py'); + const runMock = createMockTestRun(); + const token = new CancellationTokenSource().token; + const request = { profile: { kind: TestRunProfileKind.Run } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run - should not throw + await executeTestsForProjects([proj1, proj2], [item1, item2], runMock.object, request, token, deps); + + // Assert - second project still executed despite first failing + expect(proj2.executionAdapterStub.calledOnce).to.be.true; + }); + + test('should configure loadDetailedCoverage callback when run profile is Coverage', async () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + project.resultResolver.vsIdToRunId.set('test1', 'runId1'); + const item = createMockTestItem('test1', '/workspace/proj/test.py'); + const runMock = createMockTestRun(); + const token = new CancellationTokenSource().token; + const profileMock = ({ + kind: TestRunProfileKind.Coverage, + loadDetailedCoverage: undefined, + } as unknown) as TestRunProfile; + const request = { profile: profileMock } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProjects([project], [item], runMock.object, request, token, deps); + + // Assert - loadDetailedCoverage callback was configured + expect(profileMock.loadDetailedCoverage).to.not.be.undefined; + }); + + test('should include debugging=true in telemetry when run profile is Debug', async () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + project.resultResolver.vsIdToRunId.set('test1', 'runId1'); + const item = createMockTestItem('test1', '/workspace/proj/test.py'); + const runMock = createMockTestRun(); + const token = new CancellationTokenSource().token; + const request = { profile: { kind: TestRunProfileKind.Debug } } as TestRunRequest; + const deps = createMockDependencies(); + + // Run + await executeTestsForProjects([project], [item], runMock.object, request, token, deps); + + // Assert - telemetry contains debugging=true + expect(telemetryStub.calledOnce).to.be.true; + const telemetryProps = telemetryStub.firstCall.args[2]; + expect(telemetryProps.debugging).to.be.true; + }); + }); + + // ===== setupCoverageForProject Tests ===== + + suite('setupCoverageForProject', () => { + test('should configure loadDetailedCoverage callback when profile kind is Coverage', () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + const profileMock = ({ + kind: TestRunProfileKind.Coverage, + loadDetailedCoverage: undefined, + } as unknown) as TestRunProfile; + const request = { profile: profileMock } as TestRunRequest; + + // Run + setupCoverageForProject(request, project); + + // Assert + expect(profileMock.loadDetailedCoverage).to.be.a('function'); + }); + + test('should leave loadDetailedCoverage undefined when profile kind is Run', () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + const profileMock = ({ + kind: TestRunProfileKind.Run, + loadDetailedCoverage: undefined, + } as unknown) as TestRunProfile; + const request = { profile: profileMock } as TestRunRequest; + + // Run + setupCoverageForProject(request, project); + + // Assert + expect(profileMock.loadDetailedCoverage).to.be.undefined; + }); + + test('should return coverage data from detailedCoverageMap when loadDetailedCoverage is called', async () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + const mockCoverageDetails = [{ line: 1, executed: true }]; + // Use Uri.fsPath as the key to match the implementation's lookup + const fileUri = Uri.file('/workspace/proj/file.py'); + project.resultResolver.detailedCoverageMap.set(fileUri.fsPath, mockCoverageDetails as any); + const profileMock = ({ + kind: TestRunProfileKind.Coverage, + loadDetailedCoverage: undefined, + } as unknown) as TestRunProfile; + const request = { profile: profileMock } as TestRunRequest; + + // Run - configure coverage + setupCoverageForProject(request, project); + + // Run - call the configured callback + const fileCoverage = { uri: fileUri }; + const result = await profileMock.loadDetailedCoverage!( + {} as TestRun, + fileCoverage as any, + {} as CancellationToken, + ); + + // Assert + expect(result).to.deep.equal(mockCoverageDetails); + }); + + test('should return empty array when file has no coverage data in map', async () => { + // Mock + const project = createMockProjectAdapter({ projectPath: '/workspace/proj', projectName: 'proj' }); + const profileMock = ({ + kind: TestRunProfileKind.Coverage, + loadDetailedCoverage: undefined, + } as unknown) as TestRunProfile; + const request = { profile: profileMock } as TestRunRequest; + + // Run - configure coverage + setupCoverageForProject(request, project); + + // Run - call callback for file not in map + const fileCoverage = { uri: Uri.file('/workspace/proj/uncovered_file.py') }; + const result = await profileMock.loadDetailedCoverage!( + {} as TestRun, + fileCoverage as any, + {} as CancellationToken, + ); + + // Assert + expect(result).to.deep.equal([]); + }); + }); +}); diff --git a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts index e0401edc7b41..40c701b22641 100644 --- a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts +++ b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts @@ -22,6 +22,7 @@ import { EXTENSION_ROOT_DIR } from '../../../../client/constants'; import { MockChildProcess } from '../../../mocks/mockChildProcess'; import { traceInfo } from '../../../../client/logging'; import * as extapi from '../../../../client/envExt/api.internal'; +import { createMockProjectAdapter } from '../testMocks'; suite('pytest test execution adapter', () => { let useEnvExtensionStub: sinon.SinonStub; @@ -325,4 +326,210 @@ suite('pytest test execution adapter', () => { typeMoq.Times.once(), ); }); + + // ===== PROJECT-BASED EXECUTION TESTS ===== + + suite('project-based execution', () => { + test('should set PROJECT_ROOT_PATH env var when project provided', async () => { + const deferred2 = createDeferred(); + const deferred3 = createDeferred(); + execFactory = typeMoq.Mock.ofType(); + execFactory + .setup((x) => x.createActivatedEnvironment(typeMoq.It.isAny())) + .returns(() => { + deferred2.resolve(); + return Promise.resolve(execService.object); + }); + utilsWriteTestIdsFileStub.callsFake(() => { + deferred3.resolve(); + return Promise.resolve('testIdPipe-mockName'); + }); + const testRun = typeMoq.Mock.ofType(); + testRun.setup((t) => t.token).returns(() => ({ onCancellationRequested: () => undefined } as any)); + + const projectPath = path.join('/', 'workspace', 'myproject'); + const mockProject = createMockProjectAdapter({ + projectPath, + projectName: 'myproject', + pythonPath: '/custom/python/path', + }); + + const uri = Uri.file(myTestPath); + adapter = new PytestTestExecutionAdapter(configService); + adapter.runTests( + uri, + [], + TestRunProfileKind.Run, + testRun.object, + execFactory.object, + undefined, + undefined, + mockProject, + ); + + await deferred2.promise; + await deferred3.promise; + await deferred4.promise; + mockProc.trigger('close'); + + execService.verify( + (x) => + x.execObservable( + typeMoq.It.isAny(), + typeMoq.It.is((options) => { + assert.equal(options.env?.PROJECT_ROOT_PATH, projectPath); + return true; + }), + ), + typeMoq.Times.once(), + ); + }); + + test('should pass debugSessionName in LaunchOptions for debug mode with project', async () => { + const deferred3 = createDeferred(); + utilsWriteTestIdsFileStub.callsFake(() => Promise.resolve('testIdPipe-mockName')); + + debugLauncher + .setup((dl) => dl.launchDebugger(typeMoq.It.isAny(), typeMoq.It.isAny(), typeMoq.It.isAny())) + .returns(async (_opts, callback) => { + traceInfo('stubs launch debugger'); + if (typeof callback === 'function') { + deferred3.resolve(); + callback(); + } + }); + + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + + const projectPath = path.join('/', 'workspace', 'myproject'); + const mockProject = createMockProjectAdapter({ + projectPath, + projectName: 'myproject (Python 3.11)', + pythonPath: '/custom/python/path', + }); + + const uri = Uri.file(myTestPath); + adapter = new PytestTestExecutionAdapter(configService); + adapter.runTests( + uri, + [], + TestRunProfileKind.Debug, + testRun.object, + execFactory.object, + debugLauncher.object, + undefined, + mockProject, + ); + + await deferred3.promise; + + debugLauncher.verify( + (x) => + x.launchDebugger( + typeMoq.It.is((launchOptions) => { + // Project should be passed for project-based debugging + assert.ok(launchOptions.project, 'project should be defined'); + assert.equal(launchOptions.project?.name, 'myproject (Python 3.11)'); + assert.equal(launchOptions.project?.uri.fsPath, projectPath); + return true; + }), + typeMoq.It.isAny(), + typeMoq.It.isAny(), + ), + typeMoq.Times.once(), + ); + }); + + test('should not set PROJECT_ROOT_PATH when no project provided', async () => { + const deferred2 = createDeferred(); + const deferred3 = createDeferred(); + execFactory = typeMoq.Mock.ofType(); + execFactory + .setup((x) => x.createActivatedEnvironment(typeMoq.It.isAny())) + .returns(() => { + deferred2.resolve(); + return Promise.resolve(execService.object); + }); + utilsWriteTestIdsFileStub.callsFake(() => { + deferred3.resolve(); + return Promise.resolve('testIdPipe-mockName'); + }); + const testRun = typeMoq.Mock.ofType(); + testRun.setup((t) => t.token).returns(() => ({ onCancellationRequested: () => undefined } as any)); + + const uri = Uri.file(myTestPath); + adapter = new PytestTestExecutionAdapter(configService); + // Call without project parameter + adapter.runTests(uri, [], TestRunProfileKind.Run, testRun.object, execFactory.object); + + await deferred2.promise; + await deferred3.promise; + await deferred4.promise; + mockProc.trigger('close'); + + execService.verify( + (x) => + x.execObservable( + typeMoq.It.isAny(), + typeMoq.It.is((options) => { + assert.equal(options.env?.PROJECT_ROOT_PATH, undefined); + return true; + }), + ), + typeMoq.Times.once(), + ); + }); + + test('should not set project in LaunchOptions when no project provided', async () => { + const deferred3 = createDeferred(); + utilsWriteTestIdsFileStub.callsFake(() => Promise.resolve('testIdPipe-mockName')); + + debugLauncher + .setup((dl) => dl.launchDebugger(typeMoq.It.isAny(), typeMoq.It.isAny(), typeMoq.It.isAny())) + .returns(async (_opts, callback) => { + if (typeof callback === 'function') { + deferred3.resolve(); + callback(); + } + }); + + const testRun = typeMoq.Mock.ofType(); + testRun.setup((t) => t.token).returns(() => ({ onCancellationRequested: () => undefined } as any)); + + const uri = Uri.file(myTestPath); + adapter = new PytestTestExecutionAdapter(configService); + // Call without project parameter + adapter.runTests( + uri, + [], + TestRunProfileKind.Debug, + testRun.object, + execFactory.object, + debugLauncher.object, + ); + + await deferred3.promise; + + debugLauncher.verify( + (x) => + x.launchDebugger( + typeMoq.It.is((launchOptions) => { + assert.equal(launchOptions.project, undefined); + return true; + }), + typeMoq.It.isAny(), + typeMoq.It.isAny(), + ), + typeMoq.Times.once(), + ); + }); + }); }); diff --git a/src/test/testing/testController/testMocks.ts b/src/test/testing/testController/testMocks.ts new file mode 100644 index 000000000000..eb37d492f1d9 --- /dev/null +++ b/src/test/testing/testController/testMocks.ts @@ -0,0 +1,152 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/** + * Centralized mock utilities for testing testController components. + * Re-use these helpers across multiple test files for consistency. + */ + +import * as sinon from 'sinon'; +import * as typemoq from 'typemoq'; +import { TestItem, TestItemCollection, TestRun, Uri } from 'vscode'; +import { IPythonExecutionFactory } from '../../../client/common/process/types'; +import { ITestDebugLauncher } from '../../../client/testing/common/types'; +import { ProjectAdapter } from '../../../client/testing/testController/common/projectAdapter'; +import { ProjectExecutionDependencies } from '../../../client/testing/testController/common/projectTestExecution'; +import { TestProjectRegistry } from '../../../client/testing/testController/common/testProjectRegistry'; +import { ITestExecutionAdapter, ITestResultResolver } from '../../../client/testing/testController/common/types'; + +/** + * Creates a mock TestItem with configurable properties. + * @param id - The unique ID of the test item + * @param uriPath - The file path for the test item's URI + * @param children - Optional array of child test items + */ +export function createMockTestItem(id: string, uriPath: string, children?: TestItem[]): TestItem { + const childMap = new Map(); + children?.forEach((c) => childMap.set(c.id, c)); + + const mockChildren: TestItemCollection = { + size: childMap.size, + forEach: (callback: (item: TestItem, collection: TestItemCollection) => void) => { + childMap.forEach((item) => callback(item, mockChildren)); + }, + get: (itemId: string) => childMap.get(itemId), + add: () => {}, + delete: () => {}, + replace: () => {}, + [Symbol.iterator]: function* () { + for (const [key, value] of childMap) { + yield [key, value] as [string, TestItem]; + } + }, + } as TestItemCollection; + + return ({ + id, + uri: Uri.file(uriPath), + children: mockChildren, + label: id, + canResolveChildren: false, + busy: false, + tags: [], + range: undefined, + error: undefined, + parent: undefined, + } as unknown) as TestItem; +} + +/** + * Creates a mock TestItem without a URI. + * Useful for testing edge cases where test items have no associated file. + * @param id - The unique ID of the test item + */ +export function createMockTestItemWithoutUri(id: string): TestItem { + return ({ + id, + uri: undefined, + children: ({ size: 0, forEach: () => {} } as unknown) as TestItemCollection, + label: id, + } as unknown) as TestItem; +} + +export interface MockProjectAdapterConfig { + projectPath: string; + projectName: string; + pythonPath?: string; + testProvider?: 'pytest' | 'unittest'; +} + +export type MockProjectAdapter = ProjectAdapter & { executionAdapterStub: sinon.SinonStub }; + +/** + * Creates a mock ProjectAdapter for testing project-based test execution. + * @param config - Configuration object with project details + * @returns A mock ProjectAdapter with an exposed executionAdapterStub for verification + */ +export function createMockProjectAdapter(config: MockProjectAdapterConfig): MockProjectAdapter { + const runTestsStub = sinon.stub().resolves(); + const executionAdapter: ITestExecutionAdapter = ({ + runTests: runTestsStub, + } as unknown) as ITestExecutionAdapter; + + const resultResolverMock: ITestResultResolver = ({ + vsIdToRunId: new Map(), + runIdToVSid: new Map(), + runIdToTestItem: new Map(), + detailedCoverageMap: new Map(), + resolveDiscovery: () => Promise.resolve(), + resolveExecution: () => {}, + } as unknown) as ITestResultResolver; + + const adapter = ({ + projectUri: Uri.file(config.projectPath), + projectName: config.projectName, + workspaceUri: Uri.file(config.projectPath), + testProvider: config.testProvider ?? 'pytest', + pythonEnvironment: config.pythonPath + ? { + execInfo: { run: { executable: config.pythonPath } }, + } + : undefined, + pythonProject: { + name: config.projectName, + uri: Uri.file(config.projectPath), + }, + executionAdapter, + discoveryAdapter: {} as any, + resultResolver: resultResolverMock, + isDiscovering: false, + isExecuting: false, + // Expose the stub for testing + executionAdapterStub: runTestsStub, + } as unknown) as MockProjectAdapter; + + return adapter; +} + +/** + * Creates mock dependencies for project test execution. + * @returns An object containing mocked ProjectExecutionDependencies + */ +export function createMockDependencies(): ProjectExecutionDependencies { + return { + projectRegistry: typemoq.Mock.ofType().object, + pythonExecFactory: typemoq.Mock.ofType().object, + debugLauncher: typemoq.Mock.ofType().object, + }; +} + +/** + * Creates a mock TestRun with common setup methods. + * @returns A TypeMoq mock of TestRun + */ +export function createMockTestRun(): typemoq.IMock { + const runMock = typemoq.Mock.ofType(); + runMock.setup((r) => r.started(typemoq.It.isAny())); + runMock.setup((r) => r.passed(typemoq.It.isAny(), typemoq.It.isAny())); + runMock.setup((r) => r.failed(typemoq.It.isAny(), typemoq.It.isAny(), typemoq.It.isAny())); + runMock.setup((r) => r.skipped(typemoq.It.isAny())); + runMock.setup((r) => r.end()); + return runMock; +}