From c77994b63e551184eb5f220cf2e09d5a666528b8 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 2 Oct 2024 11:30:10 -0700 Subject: [PATCH 01/18] progress --- noxfile.py | 1 + python_files/testing_tools/socket_manager.py | 54 ++--- python_files/tests/pytestadapter/helpers.py | 38 +++- python_files/vscode_pytest/__init__.py | 30 ++- python_files/vscode_pytest/_common.py | 2 + requirements.in | 1 + src/client/common/pipes/namedPipes.ts | 206 ++++++++++++------ .../testing/testController/common/utils.ts | 159 +++++++------- 8 files changed, 303 insertions(+), 188 deletions(-) create mode 100644 python_files/vscode_pytest/_common.py diff --git a/noxfile.py b/noxfile.py index 60e22d461074..3991ee8c025a 100644 --- a/noxfile.py +++ b/noxfile.py @@ -53,6 +53,7 @@ def install_python_libs(session: nox.Session): ) session.install("packaging") + session.install("debugpy") # Download get-pip script session.run( diff --git a/python_files/testing_tools/socket_manager.py b/python_files/testing_tools/socket_manager.py index 347453a6ca1a..b47e39a743fc 100644 --- a/python_files/testing_tools/socket_manager.py +++ b/python_files/testing_tools/socket_manager.py @@ -20,39 +20,26 @@ def __exit__(self, *_): self.close() def connect(self): - if sys.platform == "win32": - self._writer = open(self.name, "w", encoding="utf-8") # noqa: SIM115, PTH123 - # reader created in read method - else: - self._socket = _SOCKET(socket.AF_UNIX, socket.SOCK_STREAM) - self._socket.connect(self.name) + self._writer = open(self.name, "w", encoding="utf-8") # noqa: SIM115, PTH123 + # reader created in read method return self def close(self): - if sys.platform == "win32": - self._writer.close() - else: - # add exception catch - self._socket.close() + self._writer.close() + self._reader.close() def write(self, data: str): - if sys.platform == "win32": - try: - # for windows, is should only use \n\n - request = ( - f"""content-length: {len(data)}\ncontent-type: application/json\n\n{data}""" - ) - self._writer.write(request) - self._writer.flush() - except Exception as e: - print("error attempting to write to pipe", e) - raise (e) - else: - # must include the carriage-return defined (as \r\n) for unix systems + + try: + # for windows, is should only use \n\n request = ( - f"""content-length: {len(data)}\r\ncontent-type: application/json\r\n\r\n{data}""" + f"""content-length: {len(data)}\ncontent-type: application/json\n\n{data}""" ) - self._socket.send(request.encode("utf-8")) + self._writer.write(request) + self._writer.flush() + except Exception as e: + print("error attempting to write to pipe", e) + raise (e) def read(self, bufsize=1024) -> str: """Read data from the socket. @@ -63,17 +50,10 @@ def read(self, bufsize=1024) -> str: Returns: data (str): Data received from the socket. """ - if sys.platform == "win32": - # returns a string automatically from read - if not hasattr(self, "_reader"): - self._reader = open(self.name, encoding="utf-8") # noqa: SIM115, PTH123 - return self._reader.read(bufsize) - else: - # receive bytes and convert to string - while True: - part: bytes = self._socket.recv(bufsize) - data: str = part.decode("utf-8") - return data + # returns a string automatically from read + if not hasattr(self, "_reader"): + self._reader = open(self.name, encoding="utf-8") # noqa: SIM115, PTH123 + return self._reader.read(bufsize) class SocketManager: diff --git a/python_files/tests/pytestadapter/helpers.py b/python_files/tests/pytestadapter/helpers.py index 7972eedd0919..28e17764d986 100644 --- a/python_files/tests/pytestadapter/helpers.py +++ b/python_files/tests/pytestadapter/helpers.py @@ -128,6 +128,21 @@ def parse_rpc_message(data: str) -> Tuple[Dict[str, str], str]: print("json decode error") +def _listen_on_fifo(pipe_name: str, result: List[str], completed: threading.Event): + # Open the FIFO for reading + with open(pipe_name) as fifo: + print("Waiting for data...") + while True: + if completed.is_set(): + break # Exit loop if completed event is set + data = fifo.read() # This will block until data is available + if len(data) == 0: + # If data is empty, assume EOF + break + print(f"Received: {data}") + result.append(data) + + def _listen_on_pipe_new(listener, result: List[str], completed: threading.Event): """Listen on the named pipe or Unix domain socket for JSON data from the server. @@ -307,14 +322,19 @@ def runner_with_cwd_env( # if additional environment variables are passed, add them to the environment if env_add: env.update(env_add) - server = UnixPipeServer(pipe_name) - server.start() + # server = UnixPipeServer(pipe_name) + # server.start() + ################# + # Create the FIFO (named pipe) if it doesn't exist + # if not pathlib.Path.exists(pipe_name): + os.mkfifo(pipe_name) + ################# completed = threading.Event() result = [] # result is a string array to store the data during threading t1: threading.Thread = threading.Thread( - target=_listen_on_pipe_new, args=(server, result, completed) + target=_listen_on_fifo, args=(pipe_name, result, completed) ) t1.start() @@ -364,14 +384,20 @@ def generate_random_pipe_name(prefix=""): # For Windows, named pipes have a specific naming convention. if sys.platform == "win32": - return f"\\\\.\\pipe\\{prefix}-{random_suffix}-sock" + return f"\\\\.\\pipe\\{prefix}-{random_suffix}" # For Unix-like systems, use either the XDG_RUNTIME_DIR or a temporary directory. xdg_runtime_dir = os.getenv("XDG_RUNTIME_DIR") if xdg_runtime_dir: - return os.path.join(xdg_runtime_dir, f"{prefix}-{random_suffix}.sock") # noqa: PTH118 + return os.path.join(xdg_runtime_dir, f"{prefix}-{random_suffix}") # noqa: PTH118 else: - return os.path.join(tempfile.gettempdir(), f"{prefix}-{random_suffix}.sock") # noqa: PTH118 + return os.path.join(tempfile.gettempdir(), f"{prefix}-{random_suffix}") # noqa: PTH118 + + +async def create_fifo(pipe_name: str) -> None: + # Create the FIFO (named pipe) if it doesn't exist + if not pathlib.Path.exists(pipe_name): + os.mkfifo(pipe_name) class UnixPipeServer: diff --git a/python_files/vscode_pytest/__init__.py b/python_files/vscode_pytest/__init__.py index ca06bf174418..04eabbe8e7bb 100644 --- a/python_files/vscode_pytest/__init__.py +++ b/python_files/vscode_pytest/__init__.py @@ -20,10 +20,16 @@ import pytest -script_dir = pathlib.Path(__file__).parent.parent -sys.path.append(os.fspath(script_dir)) -sys.path.append(os.fspath(script_dir / "lib" / "python")) -from testing_tools import socket_manager # noqa: E402 + +# sys.path.append("/Users/eleanorboyd/vscode-python/.nox/install_python_libs/lib/python3.10") +# sys.path.append("/Users/eleanorboyd/vscode-python-debugger") +# sys.path.append("/Users/eleanorboyd/vscode-python-debugger/bundled") +# sys.path.append("/Users/eleanorboyd/vscode-python-debugger/bundled/libs") + +# import debugpy # noqa: E402 + +# debugpy.connect(5678) +# debugpy.breakpoint() # noqa: E702 if TYPE_CHECKING: from pluggy import Result @@ -872,6 +878,7 @@ def send_post_request( payload -- the payload data to be sent. cls_encoder -- a custom encoder if needed. """ + print("EJFB into send post request!") if not TEST_RUN_PIPE: error_msg = ( "PYTEST ERROR: TEST_RUN_PIPE is not set at the time of pytest starting. " @@ -886,9 +893,10 @@ def send_post_request( if __writer is None: try: - __writer = socket_manager.PipeManager(TEST_RUN_PIPE) - __writer.connect() + print("EJFB attemping writer open") + __writer = open(TEST_RUN_PIPE, "w", encoding="utf-8", newline="\r\n") # noqa: SIM115, PTH123 except Exception as error: + print("EJFB error in writer open") error_msg = f"Error attempting to connect to extension named pipe {TEST_RUN_PIPE}[vscode-pytest]: {error}" print(error_msg, file=sys.stderr) print( @@ -905,10 +913,16 @@ def send_post_request( "params": payload, } data = json.dumps(rpc, cls=cls_encoder) - + print(f"EJFB Plugin info[vscode-pytest]: sending data: \n{data}\n") try: if __writer: - __writer.write(data) + request = f"""content-length: {len(data)}\ncontent-type: application/json\n\n{data}""" + __writer.write(request) + __writer.flush() + print( + f"EJFB Plugin info[vscode-pytest]: data sent successfully[vscode-pytest]: \n{data}\n" + ) + # __writer.close() else: print( f"Plugin error connection error[vscode-pytest], writer is None \n[vscode-pytest] data: \n{data} \n", diff --git a/python_files/vscode_pytest/_common.py b/python_files/vscode_pytest/_common.py new file mode 100644 index 000000000000..9f835f555b6e --- /dev/null +++ b/python_files/vscode_pytest/_common.py @@ -0,0 +1,2 @@ +# def send_post_request(): +# return diff --git a/requirements.in b/requirements.in index 9a490ea1b599..ad456c31cd4c 100644 --- a/requirements.in +++ b/requirements.in @@ -13,3 +13,4 @@ microvenv importlib_metadata packaging tomli +debugpy diff --git a/src/client/common/pipes/namedPipes.ts b/src/client/common/pipes/namedPipes.ts index c6010d491822..099d73ba9010 100644 --- a/src/client/common/pipes/namedPipes.ts +++ b/src/client/common/pipes/namedPipes.ts @@ -1,67 +1,17 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +import * as cp from 'child_process'; import * as crypto from 'crypto'; +import * as fs from 'fs'; import * as net from 'net'; import * as os from 'os'; import * as path from 'path'; import * as rpc from 'vscode-jsonrpc/node'; +import { CancellationError, CancellationToken } from 'vscode'; import { traceVerbose } from '../../logging'; - -export interface ConnectedServerObj { - serverOnClosePromise(): Promise; -} - -export function createNamedPipeServer( - pipeName: string, - onConnectionCallback: (value: [rpc.MessageReader, rpc.MessageWriter]) => void, -): Promise { - traceVerbose(`Creating named pipe server on ${pipeName}`); - - let connectionCount = 0; - return new Promise((resolve, reject) => { - // create a server, resolves and returns server on listen - const server = net.createServer((socket) => { - // this lambda function is called whenever a client connects to the server - connectionCount += 1; - traceVerbose('new client is connected to the socket, connectionCount: ', connectionCount, pipeName); - socket.on('close', () => { - // close event is emitted by client to the server - connectionCount -= 1; - traceVerbose('client emitted close event, connectionCount: ', connectionCount); - if (connectionCount <= 0) { - // if all clients are closed, close the server - traceVerbose('connection count is <= 0, closing the server: ', pipeName); - server.close(); - } - }); - - // upon connection create a reader and writer and pass it to the callback - onConnectionCallback([ - new rpc.SocketMessageReader(socket, 'utf-8'), - new rpc.SocketMessageWriter(socket, 'utf-8'), - ]); - }); - const closedServerPromise = new Promise((resolveOnServerClose) => { - // get executed on connection close and resolves - // implementation of the promise is the arrow function - server.on('close', resolveOnServerClose); - }); - server.on('error', reject); - - server.listen(pipeName, () => { - // this function is called when the server is listening - server.removeListener('error', reject); - const connectedServer = { - // when onClosed event is called, so is closed function - // goes backwards up the chain, when resolve2 is called, so is onClosed that means server.onClosed() on the other end can work - // event C - serverOnClosePromise: () => closedServerPromise, - }; - resolve(connectedServer); - }); - }); -} +import { isWindows } from '../platform/platformService'; +import { createDeferred } from '../utils/async'; const { XDG_RUNTIME_DIR } = process.env; export function generateRandomPipeName(prefix: string): string { @@ -72,20 +22,154 @@ export function generateRandomPipeName(prefix: string): string { } if (process.platform === 'win32') { - return `\\\\.\\pipe\\${prefix}-${randomSuffix}-sock`; + return `\\\\.\\pipe\\${prefix}-${randomSuffix}`; } let result; if (XDG_RUNTIME_DIR) { - result = path.join(XDG_RUNTIME_DIR, `${prefix}-${randomSuffix}.sock`); + result = path.join(XDG_RUNTIME_DIR, `${prefix}-${randomSuffix}`); } else { - result = path.join(os.tmpdir(), `${prefix}-${randomSuffix}.sock`); + result = path.join(os.tmpdir(), `${prefix}-${randomSuffix}`); } return result; } -export function namedPipeClient(name: string): [rpc.MessageReader, rpc.MessageWriter] { - const socket = net.connect(name); - return [new rpc.SocketMessageReader(socket, 'utf-8'), new rpc.SocketMessageWriter(socket, 'utf-8')]; +async function mkfifo(fifoPath: string): Promise { + return new Promise((resolve, reject) => { + const proc = cp.spawn('mkfifo', [fifoPath]); + proc.on('error', (err) => { + reject(err); + }); + proc.on('exit', (code) => { + if (code === 0) { + resolve(); + } + }); + }); +} + +export async function createWriterPipe(pipeName: string, token?: CancellationToken): Promise { + // windows implementation of FIFO using named pipes + if (isWindows()) { + const deferred = createDeferred(); + const server = net.createServer((socket) => { + traceVerbose(`Pipe connected: ${pipeName}`); + server.close(); + deferred.resolve(new rpc.SocketMessageWriter(socket, 'utf-8')); + }); + + server.on('error', deferred.reject); + server.listen(pipeName); + if (token) { + token.onCancellationRequested(() => { + if (server.listening) { + server.close(); + } + deferred.reject(new CancellationError()); + }); + } + return deferred.promise; + } + // linux implementation of FIFO + await mkfifo(pipeName); + const writer = fs.createWriteStream(pipeName, { + encoding: 'utf-8', + }); + return new rpc.StreamMessageWriter(writer, 'utf-8'); +} + +class CombinedReader implements rpc.MessageReader { + private _onError = new rpc.Emitter(); + + private _onClose = new rpc.Emitter(); + + private _onPartialMessage = new rpc.Emitter(); + + private _listeners = new rpc.Emitter(); + + private _readers: rpc.MessageReader[] = []; + + private _disposables: rpc.Disposable[] = []; + + constructor() { + this._disposables.push(this._onClose, this._onError, this._onPartialMessage, this._listeners); + } + + onError: rpc.Event = this._onError.event; + + onClose: rpc.Event = this._onClose.event; + + onPartialMessage: rpc.Event = this._onPartialMessage.event; + + listen(callback: rpc.DataCallback): rpc.Disposable { + return this._listeners.event(callback); + } + + add(reader: rpc.MessageReader): void { + this._readers.push(reader); + this._disposables.push( + reader.onError((error) => this._onError.fire(error)), + reader.onClose(() => this.dispose()), + reader.onPartialMessage((info) => this._onPartialMessage.fire(info)), + reader.listen((msg) => { + this._listeners.fire(msg as rpc.NotificationMessage); + }), + ); + } + + error(error: Error): void { + this._onError.fire(error); + } + + dispose(): void { + this._onClose.fire(); + this._disposables.forEach((disposable) => { + try { + disposable.dispose(); + } catch (e) { + /* noop */ + } + }); + } +} + +export async function createReaderPipe(pipeName: string, token?: CancellationToken): Promise { + if (isWindows()) { + // windows implementation of FIFO using named pipes + const deferred = createDeferred(); + const combined = new CombinedReader(); + + let refs = 0; + const server = net.createServer((socket) => { + traceVerbose(`Pipe connected: ${pipeName}`); + refs += 1; + + socket.on('close', () => { + refs -= 1; + if (refs <= 0) { + server.close(); + } + }); + combined.add(new rpc.SocketMessageReader(socket, 'utf-8')); + }); + server.on('error', deferred.reject); + server.listen(pipeName); + if (token) { + token.onCancellationRequested(() => { + if (server.listening) { + server.close(); + } + deferred.reject(new CancellationError()); + }); + } + deferred.resolve(combined); + return deferred.promise; + } + // linux implementation of FIFO + await mkfifo(pipeName); + const reader = fs.createReadStream(pipeName, { + encoding: 'utf-8', + }); + return new rpc.StreamMessageReader(reader, 'utf-8'); } diff --git a/src/client/testing/testController/common/utils.ts b/src/client/testing/testController/common/utils.ts index d386d953b933..bf454ca22508 100644 --- a/src/client/testing/testController/common/utils.ts +++ b/src/client/testing/testController/common/utils.ts @@ -20,7 +20,7 @@ import { ITestResultResolver, } from './types'; import { Deferred, createDeferred } from '../../../common/utils/async'; -import { createNamedPipeServer, generateRandomPipeName } from '../../../common/pipes/namedPipes'; +import { createReaderPipe, generateRandomPipeName } from '../../../common/pipes/namedPipes'; import { EXTENSION_ROOT_DIR } from '../../../constants'; export function fixLogLines(content: string): string { @@ -169,27 +169,27 @@ export function pythonTestAdapterRewriteEnabled(serviceContainer: IServiceContai return experiment.inExperimentSync(EnableTestAdapterRewrite.experiment); } -export async function startTestIdsNamedPipe(testIds: string[]): Promise { - const pipeName: string = generateRandomPipeName('python-test-ids'); - // uses callback so the on connect action occurs after the pipe is created - await createNamedPipeServer(pipeName, ([_reader, writer]) => { - traceVerbose('Test Ids named pipe connected'); - // const num = await - const msg = { - jsonrpc: '2.0', - params: testIds, - } as Message; - writer - .write(msg) - .then(() => { - writer.end(); - }) - .catch((ex) => { - traceError('Failed to write test ids to named pipe', ex); - }); - }); - return pipeName; -} +// export async function startTestIdsNamedPipe(testIds: string[]): Promise { +// const pipeName: string = generateRandomPipeName('python-test-ids'); +// // uses callback so the on connect action occurs after the pipe is created +// await createNamedPipeServer(pipeName, ([_reader, writer]) => { +// traceVerbose('Test Ids named pipe connected'); +// // const num = await +// const msg = { +// jsonrpc: '2.0', +// params: testIds, +// } as Message; +// writer +// .write(msg) +// .then(() => { +// writer.end(); +// }) +// .catch((ex) => { +// traceError('Failed to write test ids to named pipe', ex); +// }); +// }); +// return pipeName; +// } interface ExecutionResultMessage extends Message { params: ExecutionTestPayload; @@ -231,44 +231,41 @@ export async function startRunResultNamedPipe( cancellationToken?: CancellationToken, ): Promise<{ name: string } & Disposable> { traceVerbose('Starting Test Result named pipe'); - const pipeName: string = generateRandomPipeName('python-test-results'); + const pipeName: string = '/Users/eleanorboyd/testingFiles/inc_dec_example/temp.txt'; // generateRandomPipeName('python-test-results'); + let disposeOfServer: () => void = () => { deferredTillServerClose.resolve(); /* noop */ }; - const server = await createNamedPipeServer(pipeName, ([reader, _writer]) => { - // this lambda function is: onConnectionCallback - // this is called once per client connecting to the server - traceVerbose(`Test Result named pipe ${pipeName} connected`); - let perConnectionDisposables: (Disposable | undefined)[] = [reader]; - - // create a function to dispose of the server - disposeOfServer = () => { - // dispose of all data listeners and cancelation listeners - perConnectionDisposables.forEach((d) => d?.dispose()); - perConnectionDisposables = []; - deferredTillServerClose.resolve(); - }; - perConnectionDisposables.push( - // per connection, add a listener for the cancellation token and the data - cancellationToken?.onCancellationRequested(() => { - console.log(`Test Result named pipe ${pipeName} cancelled`); - // if cancel is called on one connection, dispose of all connections - disposeOfServer(); - }), - reader.listen((data: Message) => { - traceVerbose(`Test Result named pipe ${pipeName} received data`); - dataReceivedCallback((data as ExecutionResultMessage).params as ExecutionTestPayload); - }), - ); - server.serverOnClosePromise().then(() => { - // this is called once the server close, once per run instance - traceVerbose(`Test Result named pipe ${pipeName} closed. Disposing of listener/s.`); - // dispose of all data listeners and cancelation listeners + const reader = await createReaderPipe(pipeName, cancellationToken); + traceVerbose(`Test Discovery named pipe ${pipeName} connected`); + let perConnectionDisposables: (Disposable | undefined)[] = [reader]; + + // create a function to dispose of the server + disposeOfServer = () => { + // dispose of all data listeners and cancelation listeners + perConnectionDisposables.forEach((d) => d?.dispose()); + perConnectionDisposables = []; + deferredTillServerClose.resolve(); + }; + perConnectionDisposables.push( + cancellationToken?.onCancellationRequested(() => { + console.log(`Test Result named pipe ${pipeName} cancelled`); + // if cancel is called on one connection, dispose of all connections disposeOfServer(); - }); + }), + reader.listen((data: Message) => { + traceVerbose(`Test Result named pipe ${pipeName} received data`); + // if EOT, call decrement connection count (callback) + dataReceivedCallback((data as ExecutionResultMessage).params as ExecutionTestPayload | EOTTestPayload); + }), + ); + reader.onClose(() => { + // this is called once the server close, once per run instance + traceVerbose(`Test Result named pipe ${pipeName} closed. Disposing of listener/s.`); + // dispose of all data listeners and cancelation listeners + disposeOfServer(); }); - return { name: pipeName, dispose: disposeOfServer }; } @@ -281,33 +278,43 @@ export async function startDiscoveryNamedPipe( cancellationToken?: CancellationToken, ): Promise<{ name: string } & Disposable> { traceVerbose('Starting Test Discovery named pipe'); + // const pipeName: string = '/Users/eleanorboyd/testingFiles/inc_dec_example/temp33.txt'; const pipeName: string = generateRandomPipeName('python-test-discovery'); let dispose: () => void = () => { /* noop */ }; - await createNamedPipeServer(pipeName, ([reader, _writer]) => { - traceVerbose(`Test Discovery named pipe ${pipeName} connected`); - let disposables: (Disposable | undefined)[] = [reader]; - dispose = () => { - traceVerbose(`Test Discovery named pipe ${pipeName} disposed`); - disposables.forEach((d) => d?.dispose()); - disposables = []; - }; - disposables.push( - cancellationToken?.onCancellationRequested(() => { - traceVerbose(`Test Discovery named pipe ${pipeName} cancelled`); - dispose(); - }), - reader.listen((data: Message) => { - traceVerbose(`Test Discovery named pipe ${pipeName} received data`); - callback((data as DiscoveryResultMessage).params as DiscoveredTestPayload); - }), - reader.onClose(() => { - traceVerbose(`Test Discovery named pipe ${pipeName} closed`); - dispose(); - }), - ); + const reader = await createReaderPipe(pipeName, cancellationToken); + + reader.listen((data: Message) => { + traceVerbose(`Test Discovery named pipe ${pipeName} received data`); + callback((data as DiscoveryResultMessage).params as DiscoveredTestPayload | EOTTestPayload); }); + traceVerbose(`Test Discovery named pipe ${pipeName} connected`); + let disposables: (Disposable | undefined)[] = [reader]; + dispose = () => { + traceVerbose(`Test Discovery named pipe ${pipeName} disposed`); + disposables.forEach((d) => d?.dispose()); + disposables = []; + }; + disposables.push( + cancellationToken?.onCancellationRequested(() => { + traceVerbose(`Test Discovery named pipe ${pipeName} cancelled`); + dispose(); + }), + reader.listen((data: Message) => { + traceVerbose(`Test Discovery named pipe ${pipeName} received data`); + callback((data as DiscoveryResultMessage).params as DiscoveredTestPayload | EOTTestPayload); + }), + reader.onClose(() => { + callback(createEOTPayload(false)); + traceVerbose(`Test Discovery named pipe ${pipeName} closed`); + dispose(); + }), + reader.onError((error) => { + traceError(`Test Discovery named pipe ${pipeName} error:`, error); + dispose(); + }), + ); return { name: pipeName, dispose }; } From 67367bb1bc2b64214ba8f9262243708959ea225f Mon Sep 17 00:00:00 2001 From: Karthik Nadig Date: Mon, 7 Oct 2024 19:37:49 -0700 Subject: [PATCH 02/18] Some tweaks --- python_files/vscode_pytest/__init__.py | 45 ++++++++------------------ src/client/common/pipes/namedPipes.ts | 2 +- 2 files changed, 15 insertions(+), 32 deletions(-) diff --git a/python_files/vscode_pytest/__init__.py b/python_files/vscode_pytest/__init__.py index 04eabbe8e7bb..739d9609b7db 100644 --- a/python_files/vscode_pytest/__init__.py +++ b/python_files/vscode_pytest/__init__.py @@ -20,17 +20,6 @@ import pytest - -# sys.path.append("/Users/eleanorboyd/vscode-python/.nox/install_python_libs/lib/python3.10") -# sys.path.append("/Users/eleanorboyd/vscode-python-debugger") -# sys.path.append("/Users/eleanorboyd/vscode-python-debugger/bundled") -# sys.path.append("/Users/eleanorboyd/vscode-python-debugger/bundled/libs") - -# import debugpy # noqa: E402 - -# debugpy.connect(5678) -# debugpy.breakpoint() # noqa: E702 - if TYPE_CHECKING: from pluggy import Result @@ -161,7 +150,7 @@ def pytest_exception_interact(node, call, report): collected_test = TestRunResultDict() collected_test[node_id] = item_result cwd = pathlib.Path.cwd() - execution_post( + send_execution_message( os.fsdecode(cwd), "success", collected_test if collected_test else None, @@ -285,7 +274,7 @@ def pytest_report_teststatus(report, config): # noqa: ARG001 ) collected_test = TestRunResultDict() collected_test[absolute_node_id] = item_result - execution_post( + send_execution_message( os.fsdecode(cwd), "success", collected_test if collected_test else None, @@ -319,7 +308,7 @@ def pytest_runtest_protocol(item, nextitem): # noqa: ARG001 ) collected_test = TestRunResultDict() collected_test[absolute_node_id] = item_result - execution_post( + send_execution_message( os.fsdecode(cwd), "success", collected_test if collected_test else None, @@ -395,7 +384,7 @@ def pytest_sessionfinish(session, exitstatus): "children": [], "id_": "", } - post_response(os.fsdecode(cwd), error_node) + send_discovery_message(os.fsdecode(cwd), error_node) try: session_node: TestNode | None = build_test_tree(session) if not session_node: @@ -403,7 +392,7 @@ def pytest_sessionfinish(session, exitstatus): "Something went wrong following pytest finish, \ no session node was created" ) - post_response(os.fsdecode(cwd), session_node) + send_discovery_message(os.fsdecode(cwd), session_node) except Exception as e: ERRORS.append( f"Error Occurred, traceback: {(traceback.format_exc() if e.__traceback__ else '')}" @@ -415,7 +404,7 @@ def pytest_sessionfinish(session, exitstatus): "children": [], "id_": "", } - post_response(os.fsdecode(cwd), error_node) + send_discovery_message(os.fsdecode(cwd), error_node) else: if exitstatus == 0 or exitstatus == 1: exitstatus_bool = "success" @@ -425,7 +414,7 @@ def pytest_sessionfinish(session, exitstatus): ) exitstatus_bool = "error" - execution_post( + send_execution_message( os.fsdecode(cwd), exitstatus_bool, None, @@ -823,8 +812,10 @@ def get_node_path(node: Any) -> pathlib.Path: atexit.register(lambda: __writer.close() if __writer else None) -def execution_post(cwd: str, status: Literal["success", "error"], tests: TestRunResultDict | None): - """Sends a POST request with execution payload details. +def send_execution_message( + cwd: str, status: Literal["success", "error"], tests: TestRunResultDict | None +): + """Sends message execution payload details. Args: cwd (str): Current working directory. @@ -836,10 +827,10 @@ def execution_post(cwd: str, status: Literal["success", "error"], tests: TestRun ) if ERRORS: payload["error"] = ERRORS - send_post_request(payload) + send_message(payload) -def post_response(cwd: str, session_node: TestNode) -> None: +def send_discovery_message(cwd: str, session_node: TestNode) -> None: """ Sends a POST request with test session details in payload. @@ -855,7 +846,7 @@ def post_response(cwd: str, session_node: TestNode) -> None: } if ERRORS is not None: payload["error"] = ERRORS - send_post_request(payload, cls_encoder=PathEncoder) + send_message(payload, cls_encoder=PathEncoder) class PathEncoder(json.JSONEncoder): @@ -878,7 +869,6 @@ def send_post_request( payload -- the payload data to be sent. cls_encoder -- a custom encoder if needed. """ - print("EJFB into send post request!") if not TEST_RUN_PIPE: error_msg = ( "PYTEST ERROR: TEST_RUN_PIPE is not set at the time of pytest starting. " @@ -893,10 +883,8 @@ def send_post_request( if __writer is None: try: - print("EJFB attemping writer open") __writer = open(TEST_RUN_PIPE, "w", encoding="utf-8", newline="\r\n") # noqa: SIM115, PTH123 except Exception as error: - print("EJFB error in writer open") error_msg = f"Error attempting to connect to extension named pipe {TEST_RUN_PIPE}[vscode-pytest]: {error}" print(error_msg, file=sys.stderr) print( @@ -913,16 +901,11 @@ def send_post_request( "params": payload, } data = json.dumps(rpc, cls=cls_encoder) - print(f"EJFB Plugin info[vscode-pytest]: sending data: \n{data}\n") try: if __writer: request = f"""content-length: {len(data)}\ncontent-type: application/json\n\n{data}""" __writer.write(request) __writer.flush() - print( - f"EJFB Plugin info[vscode-pytest]: data sent successfully[vscode-pytest]: \n{data}\n" - ) - # __writer.close() else: print( f"Plugin error connection error[vscode-pytest], writer is None \n[vscode-pytest] data: \n{data} \n", diff --git a/src/client/common/pipes/namedPipes.ts b/src/client/common/pipes/namedPipes.ts index 099d73ba9010..2cf67dd47923 100644 --- a/src/client/common/pipes/namedPipes.ts +++ b/src/client/common/pipes/namedPipes.ts @@ -166,7 +166,7 @@ export async function createReaderPipe(pipeName: string, token?: CancellationTok deferred.resolve(combined); return deferred.promise; } - // linux implementation of FIFO + // mac/linux implementation of FIFO await mkfifo(pipeName); const reader = fs.createReadStream(pipeName, { encoding: 'utf-8', From 5451fe6aa64fc3dad2a1f23491d2dcaac15f0d93 Mon Sep 17 00:00:00 2001 From: Karthik Nadig Date: Mon, 7 Oct 2024 23:00:15 -0700 Subject: [PATCH 03/18] Fixes duplicated messages with fifo and dispose --- src/client/common/pipes/namedPipes.ts | 67 +++++++------ .../testing/testController/common/utils.ts | 96 ++++++++++--------- .../pytest/pytestDiscoveryAdapter.ts | 4 +- .../pytest/pytestExecutionAdapter.ts | 17 +++- 4 files changed, 102 insertions(+), 82 deletions(-) diff --git a/src/client/common/pipes/namedPipes.ts b/src/client/common/pipes/namedPipes.ts index 2cf67dd47923..d796cbee8096 100644 --- a/src/client/common/pipes/namedPipes.ts +++ b/src/client/common/pipes/namedPipes.ts @@ -3,12 +3,12 @@ import * as cp from 'child_process'; import * as crypto from 'crypto'; -import * as fs from 'fs'; +import * as fs from 'fs-extra'; import * as net from 'net'; import * as os from 'os'; import * as path from 'path'; import * as rpc from 'vscode-jsonrpc/node'; -import { CancellationError, CancellationToken } from 'vscode'; +import { CancellationError, CancellationToken, Disposable } from 'vscode'; import { traceVerbose } from '../../logging'; import { isWindows } from '../platform/platformService'; import { createDeferred } from '../utils/async'; @@ -73,6 +73,9 @@ export async function createWriterPipe(pipeName: string, token?: CancellationTok } // linux implementation of FIFO await mkfifo(pipeName); + try { + await fs.chmod(pipeName, 0o666); + } catch {} const writer = fs.createWriteStream(pipeName, { encoding: 'utf-8', }); @@ -86,14 +89,14 @@ class CombinedReader implements rpc.MessageReader { private _onPartialMessage = new rpc.Emitter(); - private _listeners = new rpc.Emitter(); - - private _readers: rpc.MessageReader[] = []; + private _callback: rpc.DataCallback = () => {}; private _disposables: rpc.Disposable[] = []; + private _readers: rpc.MessageReader[] = []; + constructor() { - this._disposables.push(this._onClose, this._onError, this._onPartialMessage, this._listeners); + this._disposables.push(this._onClose, this._onError, this._onPartialMessage); } onError: rpc.Event = this._onError.event; @@ -103,34 +106,41 @@ class CombinedReader implements rpc.MessageReader { onPartialMessage: rpc.Event = this._onPartialMessage.event; listen(callback: rpc.DataCallback): rpc.Disposable { - return this._listeners.event(callback); + this._callback = callback; + return new Disposable(() => (this._callback = () => {})); } add(reader: rpc.MessageReader): void { this._readers.push(reader); - this._disposables.push( - reader.onError((error) => this._onError.fire(error)), - reader.onClose(() => this.dispose()), - reader.onPartialMessage((info) => this._onPartialMessage.fire(info)), - reader.listen((msg) => { - this._listeners.fire(msg as rpc.NotificationMessage); - }), - ); + reader.listen((msg) => { + this._callback(msg as rpc.NotificationMessage); + }); + this._disposables.push(reader); + reader.onClose(() => { + this.remove(reader); + if (this._readers.length === 0) { + this._onClose.fire(); + } + }); + reader.onError((e) => { + this.remove(reader); + this._onError.fire(e); + }); } - error(error: Error): void { - this._onError.fire(error); + remove(reader: rpc.MessageReader): void { + const found = this._readers.find((r) => r === reader); + if (found) { + this._readers = this._readers.filter((r) => r !== reader); + reader.dispose(); + } } dispose(): void { - this._onClose.fire(); - this._disposables.forEach((disposable) => { - try { - disposable.dispose(); - } catch (e) { - /* noop */ - } - }); + this._readers.forEach((r) => r.dispose()); + this._readers = []; + this._disposables.forEach((disposable) => disposable.dispose()); + this._disposables = []; } } @@ -168,8 +178,9 @@ export async function createReaderPipe(pipeName: string, token?: CancellationTok } // mac/linux implementation of FIFO await mkfifo(pipeName); - const reader = fs.createReadStream(pipeName, { - encoding: 'utf-8', - }); + try { + await fs.chmod(pipeName, 0o666); + } catch {} + const reader = fs.createReadStream(pipeName, { encoding: 'utf-8' }); return new rpc.StreamMessageReader(reader, 'utf-8'); } diff --git a/src/client/testing/testController/common/utils.ts b/src/client/testing/testController/common/utils.ts index bf454ca22508..a34ac013ba52 100644 --- a/src/client/testing/testController/common/utils.ts +++ b/src/client/testing/testController/common/utils.ts @@ -229,44 +229,47 @@ export async function startRunResultNamedPipe( dataReceivedCallback: (payload: ExecutionTestPayload) => void, deferredTillServerClose: Deferred, cancellationToken?: CancellationToken, -): Promise<{ name: string } & Disposable> { +): Promise { traceVerbose('Starting Test Result named pipe'); - const pipeName: string = '/Users/eleanorboyd/testingFiles/inc_dec_example/temp.txt'; // generateRandomPipeName('python-test-results'); + const pipeName: string = generateRandomPipeName('python-test-results'); - let disposeOfServer: () => void = () => { - deferredTillServerClose.resolve(); - /* noop */ - }; const reader = await createReaderPipe(pipeName, cancellationToken); - traceVerbose(`Test Discovery named pipe ${pipeName} connected`); - let perConnectionDisposables: (Disposable | undefined)[] = [reader]; - - // create a function to dispose of the server - disposeOfServer = () => { - // dispose of all data listeners and cancelation listeners - perConnectionDisposables.forEach((d) => d?.dispose()); - perConnectionDisposables = []; + traceVerbose(`Test Results named pipe ${pipeName} connected`); + let disposables: Disposable[] = []; + const disposable = new Disposable(() => { + traceVerbose(`Test Results named pipe ${pipeName} disposed`); + disposables.forEach((d) => d.dispose()); + disposables = []; deferredTillServerClose.resolve(); - }; - perConnectionDisposables.push( - cancellationToken?.onCancellationRequested(() => { - console.log(`Test Result named pipe ${pipeName} cancelled`); - // if cancel is called on one connection, dispose of all connections - disposeOfServer(); - }), + }); + + if (cancellationToken) { + disposables.push( + cancellationToken?.onCancellationRequested(() => { + console.log(`Test Result named pipe ${pipeName} cancelled`); + disposable.dispose(); + }), + ); + } + disposables.push( + reader, reader.listen((data: Message) => { traceVerbose(`Test Result named pipe ${pipeName} received data`); // if EOT, call decrement connection count (callback) dataReceivedCallback((data as ExecutionResultMessage).params as ExecutionTestPayload | EOTTestPayload); }), + reader.onClose(() => { + // this is called once the server close, once per run instance + traceVerbose(`Test Result named pipe ${pipeName} closed. Disposing of listener/s.`); + // dispose of all data listeners and cancelation listeners + disposable.dispose(); + }), + reader.onError((error) => { + traceError(`Test Results named pipe ${pipeName} error:`, error); + }), ); - reader.onClose(() => { - // this is called once the server close, once per run instance - traceVerbose(`Test Result named pipe ${pipeName} closed. Disposing of listener/s.`); - // dispose of all data listeners and cancelation listeners - disposeOfServer(); - }); - return { name: pipeName, dispose: disposeOfServer }; + + return pipeName; } interface DiscoveryResultMessage extends Message { @@ -276,31 +279,31 @@ interface DiscoveryResultMessage extends Message { export async function startDiscoveryNamedPipe( callback: (payload: DiscoveredTestPayload) => void, cancellationToken?: CancellationToken, -): Promise<{ name: string } & Disposable> { +): Promise { traceVerbose('Starting Test Discovery named pipe'); // const pipeName: string = '/Users/eleanorboyd/testingFiles/inc_dec_example/temp33.txt'; const pipeName: string = generateRandomPipeName('python-test-discovery'); - let dispose: () => void = () => { - /* noop */ - }; const reader = await createReaderPipe(pipeName, cancellationToken); - reader.listen((data: Message) => { - traceVerbose(`Test Discovery named pipe ${pipeName} received data`); - callback((data as DiscoveryResultMessage).params as DiscoveredTestPayload | EOTTestPayload); - }); traceVerbose(`Test Discovery named pipe ${pipeName} connected`); - let disposables: (Disposable | undefined)[] = [reader]; - dispose = () => { + let disposables: Disposable[] = []; + const disposable = new Disposable(() => { traceVerbose(`Test Discovery named pipe ${pipeName} disposed`); - disposables.forEach((d) => d?.dispose()); + disposables.forEach((d) => d.dispose()); disposables = []; - }; + }); + + if (cancellationToken) { + disposables.push( + cancellationToken.onCancellationRequested(() => { + traceVerbose(`Test Discovery named pipe ${pipeName} cancelled`); + disposable.dispose(); + }), + ); + } + disposables.push( - cancellationToken?.onCancellationRequested(() => { - traceVerbose(`Test Discovery named pipe ${pipeName} cancelled`); - dispose(); - }), + reader, reader.listen((data: Message) => { traceVerbose(`Test Discovery named pipe ${pipeName} received data`); callback((data as DiscoveryResultMessage).params as DiscoveredTestPayload | EOTTestPayload); @@ -308,14 +311,13 @@ export async function startDiscoveryNamedPipe( reader.onClose(() => { callback(createEOTPayload(false)); traceVerbose(`Test Discovery named pipe ${pipeName} closed`); - dispose(); + disposable.dispose(); }), reader.onError((error) => { traceError(`Test Discovery named pipe ${pipeName} error:`, error); - dispose(); }), ); - return { name: pipeName, dispose }; + return pipeName; } export async function startTestIdServer(testIds: string[]): Promise { diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index e62bd02dd3de..3a562685af94 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -41,14 +41,14 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { executionFactory?: IPythonExecutionFactory, interpreter?: PythonEnvironment, ): Promise { - const { name, dispose } = await startDiscoveryNamedPipe((data: DiscoveredTestPayload) => { + const name = await startDiscoveryNamedPipe((data: DiscoveredTestPayload) => { this.resultResolver?.resolveDiscovery(data); }); try { await this.runPytestDiscovery(uri, name, executionFactory, interpreter); } finally { - dispose(); + traceVerbose('donee'); } // this is only a placeholder to handle function overloading until rewrite is finished const discoveryPayload: DiscoveredTestPayload = { cwd: uri.fsPath, status: 'success' }; diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index fadec7f73488..9cf4dad8a0c2 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -48,10 +48,13 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { traceError(`No run instance found, cannot resolve execution, for workspace ${uri.fsPath}.`); } }; - const { name, dispose: serverDispose } = await utils.startRunResultNamedPipe( + const cSource = new CancellationTokenSource(); + runInstance?.token.onCancellationRequested(() => cSource.cancel()); + + const name = await utils.startRunResultNamedPipe( dataReceivedCallback, // callback to handle data received deferredTillServerClose, // deferred to resolve when server closes - runInstance?.token, // token to cancel + cSource.token, // token to cancel ); runInstance?.token.onCancellationRequested(() => { traceInfo(`Test run cancelled, resolving 'TillServerClose' deferred for ${uri.fsPath}.`); @@ -71,7 +74,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { uri, testIds, name, - serverDispose, + deferredTillEOT, + cSource, runInstance, profileKind, executionFactory, @@ -169,7 +173,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { }; traceInfo(`Running DEBUG pytest with arguments: ${testArgs} for workspace ${uri.fsPath} \r\n`); await debugLauncher!.launchDebugger(launchOptions, () => { - serverDispose(); // this will resolve deferredTillServerClose + serverCancel.cancel(); + deferredTillEOT?.resolve(); }); } else { // deferredTillExecClose is resolved when all stdout and stderr is read @@ -233,10 +238,12 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { } // this doesn't work, it instead directs us to the noop one which is defined first // potentially this is due to the server already being close, if this is the case? - serverDispose(); // this will resolve deferredTillServerClose } + + // deferredTillEOT is resolved when all data sent on stdout and stderr is received, close event is only called when this occurs // due to the sync reading of the output. deferredTillExecClose.resolve(); + serverCancel.cancel(); }); await deferredTillExecClose.promise; } From c6c47f7ebf0b9516204733dfc93cf2a0e3549d80 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 28 Oct 2024 11:26:38 -0700 Subject: [PATCH 04/18] minor updates to fix EOT removal and unittest working --- python_files/testing_tools/socket_manager.py | 8 +++----- python_files/unittestadapter/pvsc_utils.py | 11 ++++++----- python_files/vscode_pytest/__init__.py | 4 ++-- src/client/common/pipes/namedPipes.ts | 12 +++++++++--- .../testing/testController/common/utils.ts | 5 ++--- .../pytest/pytestExecutionAdapter.ts | 9 ++------- .../unittest/testDiscoveryAdapter.ts | 4 ++-- .../unittest/testExecutionAdapter.ts | 19 ++++++++++--------- 8 files changed, 36 insertions(+), 36 deletions(-) diff --git a/python_files/testing_tools/socket_manager.py b/python_files/testing_tools/socket_manager.py index b47e39a743fc..f143ac111cdb 100644 --- a/python_files/testing_tools/socket_manager.py +++ b/python_files/testing_tools/socket_manager.py @@ -26,15 +26,13 @@ def connect(self): def close(self): self._writer.close() - self._reader.close() + if hasattr(self, "_reader"): + self._reader.close() def write(self, data: str): - try: # for windows, is should only use \n\n - request = ( - f"""content-length: {len(data)}\ncontent-type: application/json\n\n{data}""" - ) + request = f"""content-length: {len(data)}\ncontent-type: application/json\n\n{data}""" self._writer.write(request) self._writer.flush() except Exception as e: diff --git a/python_files/unittestadapter/pvsc_utils.py b/python_files/unittestadapter/pvsc_utils.py index 09e61ff40518..16473f5691eb 100644 --- a/python_files/unittestadapter/pvsc_utils.py +++ b/python_files/unittestadapter/pvsc_utils.py @@ -307,7 +307,7 @@ def parse_unittest_args( def send_post_request( - payload: Union[ExecutionPayloadDict, DiscoveryPayloadDict, CoveragePayloadDict], + payload: ExecutionPayloadDict | DiscoveryPayloadDict | CoveragePayloadDict, test_run_pipe: Optional[str], ): """ @@ -331,10 +331,10 @@ def send_post_request( if __writer is None: try: - __writer = socket_manager.PipeManager(test_run_pipe) - __writer.connect() + __writer = open(test_run_pipe, "w", encoding="utf-8", newline="\r\n") # noqa: SIM115, PTH123 except Exception as error: error_msg = f"Error attempting to connect to extension named pipe {test_run_pipe}[vscode-unittest]: {error}" + print(error_msg, file=sys.stderr) __writer = None raise VSCodeUnittestError(error_msg) from error @@ -343,10 +343,11 @@ def send_post_request( "params": payload, } data = json.dumps(rpc) - try: if __writer: - __writer.write(data) + request = f"""content-length: {len(data)}\ncontent-type: application/json\n\n{data}""" + __writer.write(request) + __writer.flush() else: print( f"Connection error[vscode-unittest], writer is None \n[vscode-unittest] data: \n{data} \n", diff --git a/python_files/vscode_pytest/__init__.py b/python_files/vscode_pytest/__init__.py index 739d9609b7db..335be247263b 100644 --- a/python_files/vscode_pytest/__init__.py +++ b/python_files/vscode_pytest/__init__.py @@ -448,7 +448,7 @@ def pytest_sessionfinish(session, exitstatus): result=file_coverage_map, error=None, ) - send_post_request(payload) + send_message(payload) def build_test_tree(session: pytest.Session) -> TestNode: @@ -858,7 +858,7 @@ def default(self, o): return super().default(o) -def send_post_request( +def send_message( payload: ExecutionPayloadDict | DiscoveryPayloadDict | CoveragePayloadDict, cls_encoder=None, ): diff --git a/src/client/common/pipes/namedPipes.ts b/src/client/common/pipes/namedPipes.ts index d796cbee8096..9fd21fead2d5 100644 --- a/src/client/common/pipes/namedPipes.ts +++ b/src/client/common/pipes/namedPipes.ts @@ -75,7 +75,9 @@ export async function createWriterPipe(pipeName: string, token?: CancellationTok await mkfifo(pipeName); try { await fs.chmod(pipeName, 0o666); - } catch {} + } catch { + // Intentionally ignored + } const writer = fs.createWriteStream(pipeName, { encoding: 'utf-8', }); @@ -89,6 +91,7 @@ class CombinedReader implements rpc.MessageReader { private _onPartialMessage = new rpc.Emitter(); + // eslint-disable-next-line @typescript-eslint/no-empty-function private _callback: rpc.DataCallback = () => {}; private _disposables: rpc.Disposable[] = []; @@ -107,6 +110,7 @@ class CombinedReader implements rpc.MessageReader { listen(callback: rpc.DataCallback): rpc.Disposable { this._callback = callback; + // eslint-disable-next-line no-return-assign, @typescript-eslint/no-empty-function return new Disposable(() => (this._callback = () => {})); } @@ -167,7 +171,7 @@ export async function createReaderPipe(pipeName: string, token?: CancellationTok server.listen(pipeName); if (token) { token.onCancellationRequested(() => { - if (server.listening) { + if (server.listening) { server.close(); } deferred.reject(new CancellationError()); @@ -180,7 +184,9 @@ export async function createReaderPipe(pipeName: string, token?: CancellationTok await mkfifo(pipeName); try { await fs.chmod(pipeName, 0o666); - } catch {} + } catch { + // Intentionally ignored + } const reader = fs.createReadStream(pipeName, { encoding: 'utf-8' }); return new rpc.StreamMessageReader(reader, 'utf-8'); } diff --git a/src/client/testing/testController/common/utils.ts b/src/client/testing/testController/common/utils.ts index a34ac013ba52..8ca5bf469c12 100644 --- a/src/client/testing/testController/common/utils.ts +++ b/src/client/testing/testController/common/utils.ts @@ -256,7 +256,7 @@ export async function startRunResultNamedPipe( reader.listen((data: Message) => { traceVerbose(`Test Result named pipe ${pipeName} received data`); // if EOT, call decrement connection count (callback) - dataReceivedCallback((data as ExecutionResultMessage).params as ExecutionTestPayload | EOTTestPayload); + dataReceivedCallback((data as ExecutionResultMessage).params as ExecutionTestPayload); }), reader.onClose(() => { // this is called once the server close, once per run instance @@ -306,10 +306,9 @@ export async function startDiscoveryNamedPipe( reader, reader.listen((data: Message) => { traceVerbose(`Test Discovery named pipe ${pipeName} received data`); - callback((data as DiscoveryResultMessage).params as DiscoveredTestPayload | EOTTestPayload); + callback((data as DiscoveryResultMessage).params as DiscoveredTestPayload); }), reader.onClose(() => { - callback(createEOTPayload(false)); traceVerbose(`Test Discovery named pipe ${pipeName} closed`); disposable.dispose(); }), diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 9cf4dad8a0c2..a7578045b84a 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -import { TestRun, TestRunProfileKind, Uri } from 'vscode'; +import { CancellationTokenSource, TestRun, TestRunProfileKind, Uri } from 'vscode'; import * as path from 'path'; import { ChildProcess } from 'child_process'; import { IConfigurationService, ITestOutputChannel } from '../../../common/types'; @@ -58,9 +58,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { ); runInstance?.token.onCancellationRequested(() => { traceInfo(`Test run cancelled, resolving 'TillServerClose' deferred for ${uri.fsPath}.`); - // if canceled, stop listening for results - serverDispose(); // this will resolve deferredTillServerClose - const executionPayload: ExecutionTestPayload = { cwd: uri.fsPath, status: 'success', @@ -74,7 +71,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { uri, testIds, name, - deferredTillEOT, cSource, runInstance, profileKind, @@ -100,7 +96,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { uri: Uri, testIds: string[], resultNamedPipeName: string, - serverDispose: () => void, + serverCancel: CancellationTokenSource, runInstance?: TestRun, profileKind?: TestRunProfileKind, executionFactory?: IPythonExecutionFactory, @@ -174,7 +170,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { traceInfo(`Running DEBUG pytest with arguments: ${testArgs} for workspace ${uri.fsPath} \r\n`); await debugLauncher!.launchDebugger(launchOptions, () => { serverCancel.cancel(); - deferredTillEOT?.resolve(); }); } else { // deferredTillExecClose is resolved when all stdout and stderr is read diff --git a/src/client/testing/testController/unittest/testDiscoveryAdapter.ts b/src/client/testing/testController/unittest/testDiscoveryAdapter.ts index b2047f96a01f..ba52d1ffd57b 100644 --- a/src/client/testing/testController/unittest/testDiscoveryAdapter.ts +++ b/src/client/testing/testController/unittest/testDiscoveryAdapter.ts @@ -44,7 +44,7 @@ export class UnittestTestDiscoveryAdapter implements ITestDiscoveryAdapter { const { unittestArgs } = settings.testing; const cwd = settings.testing.cwd && settings.testing.cwd.length > 0 ? settings.testing.cwd : uri.fsPath; - const { name, dispose } = await startDiscoveryNamedPipe((data: DiscoveredTestPayload) => { + const name = await startDiscoveryNamedPipe((data: DiscoveredTestPayload) => { this.resultResolver?.resolveDiscovery(data); }); @@ -66,7 +66,7 @@ export class UnittestTestDiscoveryAdapter implements ITestDiscoveryAdapter { try { await this.runDiscovery(uri, options, name, cwd, executionFactory); } finally { - dispose(); + // none } // placeholder until after the rewrite is adopted // TODO: remove after adoption. diff --git a/src/client/testing/testController/unittest/testExecutionAdapter.ts b/src/client/testing/testController/unittest/testExecutionAdapter.ts index 285f045f3e33..e72d877e7617 100644 --- a/src/client/testing/testController/unittest/testExecutionAdapter.ts +++ b/src/client/testing/testController/unittest/testExecutionAdapter.ts @@ -2,7 +2,7 @@ // Licensed under the MIT License. import * as path from 'path'; -import { TestRun, TestRunProfileKind, Uri } from 'vscode'; +import { CancellationTokenSource, TestRun, TestRunProfileKind, Uri } from 'vscode'; import { ChildProcess } from 'child_process'; import { IConfigurationService, ITestOutputChannel } from '../../../common/types'; import { Deferred, createDeferred } from '../../../common/utils/async'; @@ -58,23 +58,24 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { traceError(`No run instance found, cannot resolve execution, for workspace ${uri.fsPath}.`); } }; - const { name: resultNamedPipeName, dispose: serverDispose } = await utils.startRunResultNamedPipe( + const cSource = new CancellationTokenSource(); + runInstance?.token.onCancellationRequested(() => cSource.cancel()); + const name = await utils.startRunResultNamedPipe( dataReceivedCallback, // callback to handle data received deferredTillServerClose, // deferred to resolve when server closes - runInstance?.token, // token to cancel + cSource.token, // token to cancel ); runInstance?.token.onCancellationRequested(() => { console.log(`Test run cancelled, resolving 'till TillAllServerClose' deferred for ${uri.fsPath}.`); // if canceled, stop listening for results deferredTillServerClose.resolve(); - serverDispose(); }); try { await this.runTestsNew( uri, testIds, - resultNamedPipeName, - serverDispose, + name, + cSource, runInstance, profileKind, executionFactory, @@ -97,7 +98,7 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { uri: Uri, testIds: string[], resultNamedPipeName: string, - serverDispose: () => void, + serverCancel: CancellationTokenSource, runInstance?: TestRun, profileKind?: TestRunProfileKind, executionFactory?: IPythonExecutionFactory, @@ -172,7 +173,7 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { throw new Error('Debug launcher is not defined'); } await debugLauncher.launchDebugger(launchOptions, () => { - serverDispose(); // this will resolve the deferredTillAllServerClose + serverCancel.cancel(); }); } else { // This means it is running the test @@ -225,9 +226,9 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { runInstance, ); } - serverDispose(); } deferredTillExecClose.resolve(); + serverCancel.cancel(); }); await deferredTillExecClose.promise; } From 8806d2789649a69ec9f756b1c80f6e70773fb82e Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 28 Oct 2024 12:51:37 -0700 Subject: [PATCH 05/18] formatting & linting --- python_files/tests/pytestadapter/helpers.py | 6 ------ python_files/unittestadapter/pvsc_utils.py | 2 +- src/client/common/pipes/namedPipes.ts | 2 +- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/python_files/tests/pytestadapter/helpers.py b/python_files/tests/pytestadapter/helpers.py index 28e17764d986..f2e392b49e6a 100644 --- a/python_files/tests/pytestadapter/helpers.py +++ b/python_files/tests/pytestadapter/helpers.py @@ -394,12 +394,6 @@ def generate_random_pipe_name(prefix=""): return os.path.join(tempfile.gettempdir(), f"{prefix}-{random_suffix}") # noqa: PTH118 -async def create_fifo(pipe_name: str) -> None: - # Create the FIFO (named pipe) if it doesn't exist - if not pathlib.Path.exists(pipe_name): - os.mkfifo(pipe_name) - - class UnixPipeServer: def __init__(self, name): self.name = name diff --git a/python_files/unittestadapter/pvsc_utils.py b/python_files/unittestadapter/pvsc_utils.py index 16473f5691eb..da3e21a74954 100644 --- a/python_files/unittestadapter/pvsc_utils.py +++ b/python_files/unittestadapter/pvsc_utils.py @@ -307,7 +307,7 @@ def parse_unittest_args( def send_post_request( - payload: ExecutionPayloadDict | DiscoveryPayloadDict | CoveragePayloadDict, + payload: Union[ExecutionPayloadDict, DiscoveryPayloadDict, CoveragePayloadDict], test_run_pipe: Optional[str], ): """ diff --git a/src/client/common/pipes/namedPipes.ts b/src/client/common/pipes/namedPipes.ts index 9fd21fead2d5..81a2444f9bf0 100644 --- a/src/client/common/pipes/namedPipes.ts +++ b/src/client/common/pipes/namedPipes.ts @@ -171,7 +171,7 @@ export async function createReaderPipe(pipeName: string, token?: CancellationTok server.listen(pipeName); if (token) { token.onCancellationRequested(() => { - if (server.listening) { + if (server.listening) { server.close(); } deferred.reject(new CancellationError()); From 1271943d03f85146eb304e8447568d66f89d6e23 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 28 Oct 2024 13:37:03 -0700 Subject: [PATCH 06/18] fixing mocked named pipes --- python_files/unittestadapter/pvsc_utils.py | 2 -- .../pytest/pytestDiscoveryAdapter.unit.test.ts | 9 +-------- .../pytest/pytestExecutionAdapter.unit.test.ts | 9 +-------- .../unittest/testExecutionAdapter.unit.test.ts | 9 +-------- 4 files changed, 3 insertions(+), 26 deletions(-) diff --git a/python_files/unittestadapter/pvsc_utils.py b/python_files/unittestadapter/pvsc_utils.py index da3e21a74954..cba3a2d1f59d 100644 --- a/python_files/unittestadapter/pvsc_utils.py +++ b/python_files/unittestadapter/pvsc_utils.py @@ -18,8 +18,6 @@ from typing_extensions import NotRequired # noqa: E402 -from testing_tools import socket_manager # noqa: E402 - # Types diff --git a/src/test/testing/testController/pytest/pytestDiscoveryAdapter.unit.test.ts b/src/test/testing/testController/pytest/pytestDiscoveryAdapter.unit.test.ts index 87b91f6ae2da..e37cc4e622a5 100644 --- a/src/test/testing/testController/pytest/pytestDiscoveryAdapter.unit.test.ts +++ b/src/test/testing/testController/pytest/pytestDiscoveryAdapter.unit.test.ts @@ -40,14 +40,7 @@ suite('pytest test discovery adapter', () => { mockExtensionRootDir.setup((m) => m.toString()).returns(() => '/mocked/extension/root/dir'); utilsStartDiscoveryNamedPipeStub = sinon.stub(util, 'startDiscoveryNamedPipe'); - utilsStartDiscoveryNamedPipeStub.callsFake(() => - Promise.resolve({ - name: 'discoveryResultPipe-mockName', - dispose: () => { - /* no-op */ - }, - }), - ); + utilsStartDiscoveryNamedPipeStub.callsFake(() => Promise.resolve('discoveryResultPipe-mockName')); // constants expectedPath = path.join('/', 'my', 'test', 'path'); diff --git a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts index 9e0f0d3d6302..9e9b39e91ce8 100644 --- a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts +++ b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts @@ -83,14 +83,7 @@ suite('pytest test execution adapter', () => { myTestPath = path.join('/', 'my', 'test', 'path', '/'); utilsStartRunResultNamedPipeStub = sinon.stub(util, 'startRunResultNamedPipe'); - utilsStartRunResultNamedPipeStub.callsFake(() => - Promise.resolve({ - name: 'runResultPipe-mockName', - dispose: () => { - /* no-op */ - }, - }), - ); + utilsStartRunResultNamedPipeStub.callsFake(() => Promise.resolve('runResultPipe-mockName')); }); teardown(() => { sinon.restore(); diff --git a/src/test/testing/testController/unittest/testExecutionAdapter.unit.test.ts b/src/test/testing/testController/unittest/testExecutionAdapter.unit.test.ts index d763cbcdff92..9521c4ab9b79 100644 --- a/src/test/testing/testController/unittest/testExecutionAdapter.unit.test.ts +++ b/src/test/testing/testController/unittest/testExecutionAdapter.unit.test.ts @@ -83,14 +83,7 @@ suite('Unittest test execution adapter', () => { myTestPath = path.join('/', 'my', 'test', 'path', '/'); utilsStartRunResultNamedPipeStub = sinon.stub(util, 'startRunResultNamedPipe'); - utilsStartRunResultNamedPipeStub.callsFake(() => - Promise.resolve({ - name: 'runResultPipe-mockName', - dispose: () => { - /* no-op */ - }, - }), - ); + utilsStartRunResultNamedPipeStub.callsFake(() => Promise.resolve('runResultPipe-mockName')); }); teardown(() => { sinon.restore(); From 17e23acce23990630ae8196f99b8e79d29b3c657 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 28 Oct 2024 14:38:50 -0700 Subject: [PATCH 07/18] fix server cancelation in tests --- .../testCancellationRunAdapters.unit.test.ts | 27 ++++++++----------- .../testDiscoveryAdapter.unit.test.ts | 9 +------ 2 files changed, 12 insertions(+), 24 deletions(-) diff --git a/src/test/testing/testController/testCancellationRunAdapters.unit.test.ts b/src/test/testing/testController/testCancellationRunAdapters.unit.test.ts index 96f15f0b91f7..0c8d09021cc5 100644 --- a/src/test/testing/testController/testCancellationRunAdapters.unit.test.ts +++ b/src/test/testing/testController/testCancellationRunAdapters.unit.test.ts @@ -66,11 +66,15 @@ suite('Execution Flow Run Adapters', () => { const { token } = cancellationToken; testRunMock.setup((t) => t.token).returns(() => token); + // run result pipe mocking and the related server close dispose + let deferredTillServerCloseTester: Deferred | undefined; + // // mock exec service and exec factory execServiceStub .setup((x) => x.execObservable(typeMoq.It.isAny(), typeMoq.It.isAny())) .returns(() => { cancellationToken.cancel(); + deferredTillServerCloseTester?.resolve(); return { proc: mockProc as any, out: typeMoq.Mock.ofType>>().object, @@ -92,11 +96,9 @@ suite('Execution Flow Run Adapters', () => { return Promise.resolve('named-pipe'); }); - // run result pipe mocking and the related server close dispose - let deferredTillServerCloseTester: Deferred | undefined; utilsStartRunResultNamedPipe.callsFake((_callback, deferredTillServerClose, _token) => { deferredTillServerCloseTester = deferredTillServerClose; - return Promise.resolve({ name: 'named-pipes-socket-name', dispose: serverDisposeStub }); + return Promise.resolve('named-pipes-socket-name'); }); serverDisposeStub.callsFake(() => { console.log('server disposed'); @@ -122,9 +124,6 @@ suite('Execution Flow Run Adapters', () => { ); // wait for server to start to keep test from failing await deferredStartTestIdsNamedPipe.promise; - - // assert the server dispose function was called correctly - sinon.assert.calledOnce(serverDisposeStub); }); test(`Adapter ${adapter}: token called mid-debug resolves correctly`, async () => { // mock test run and cancelation token @@ -133,11 +132,15 @@ suite('Execution Flow Run Adapters', () => { const { token } = cancellationToken; testRunMock.setup((t) => t.token).returns(() => token); + // run result pipe mocking and the related server close dispose + let deferredTillServerCloseTester: Deferred | undefined; + // // mock exec service and exec factory execServiceStub .setup((x) => x.execObservable(typeMoq.It.isAny(), typeMoq.It.isAny())) .returns(() => { cancellationToken.cancel(); + deferredTillServerCloseTester?.resolve(); return { proc: mockProc as any, out: typeMoq.Mock.ofType>>().object, @@ -159,14 +162,9 @@ suite('Execution Flow Run Adapters', () => { return Promise.resolve('named-pipe'); }); - // run result pipe mocking and the related server close dispose - let deferredTillServerCloseTester: Deferred | undefined; utilsStartRunResultNamedPipe.callsFake((_callback, deferredTillServerClose, _token) => { deferredTillServerCloseTester = deferredTillServerClose; - return Promise.resolve({ - name: 'named-pipes-socket-name', - dispose: serverDisposeStub, - }); + return Promise.resolve('named-pipes-socket-name'); }); serverDisposeStub.callsFake(() => { console.log('server disposed'); @@ -190,6 +188,7 @@ suite('Execution Flow Run Adapters', () => { }) .returns(async () => { cancellationToken.cancel(); + deferredTillServerCloseTester?.resolve(); return Promise.resolve(); }); @@ -205,10 +204,6 @@ suite('Execution Flow Run Adapters', () => { ); // wait for server to start to keep test from failing await deferredStartTestIdsNamedPipe.promise; - - // TODO: fix the server disposal so it is called once not twice, - // currently not a problem but would be useful to improve clarity - sinon.assert.called(serverDisposeStub); }); }); }); diff --git a/src/test/testing/testController/unittest/testDiscoveryAdapter.unit.test.ts b/src/test/testing/testController/unittest/testDiscoveryAdapter.unit.test.ts index e0442197467f..e6d1cbc29293 100644 --- a/src/test/testing/testController/unittest/testDiscoveryAdapter.unit.test.ts +++ b/src/test/testing/testController/unittest/testDiscoveryAdapter.unit.test.ts @@ -77,14 +77,7 @@ suite('Unittest test discovery adapter', () => { }; utilsStartDiscoveryNamedPipeStub = sinon.stub(util, 'startDiscoveryNamedPipe'); - utilsStartDiscoveryNamedPipeStub.callsFake(() => - Promise.resolve({ - name: 'discoveryResultPipe-mockName', - dispose: () => { - /* no-op */ - }, - }), - ); + utilsStartDiscoveryNamedPipeStub.callsFake(() => Promise.resolve('discoveryResultPipe-mockName')); }); teardown(() => { sinon.restore(); From f69406160f534b972f7c10c053e916fcd80a133d Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Tue, 29 Oct 2024 14:02:35 -0700 Subject: [PATCH 08/18] fix cancelation tests --- python_files/tests/pytestadapter/helpers.py | 3 ++- .../testController/pytest/pytestExecutionAdapter.ts | 1 + .../testController/unittest/testExecutionAdapter.ts | 1 + .../testCancellationRunAdapters.unit.test.ts | 12 ++++++++---- 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/python_files/tests/pytestadapter/helpers.py b/python_files/tests/pytestadapter/helpers.py index f2e392b49e6a..7a75e6248844 100644 --- a/python_files/tests/pytestadapter/helpers.py +++ b/python_files/tests/pytestadapter/helpers.py @@ -130,7 +130,8 @@ def parse_rpc_message(data: str) -> Tuple[Dict[str, str], str]: def _listen_on_fifo(pipe_name: str, result: List[str], completed: threading.Event): # Open the FIFO for reading - with open(pipe_name) as fifo: + fifo_path = pathlib.Path(pipe_name) + with fifo_path.open() as fifo: print("Waiting for data...") while True: if completed.is_set(): diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index a7578045b84a..716999770b0d 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -188,6 +188,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { resultProc?.kill(); } else { deferredTillExecClose.resolve(); + serverCancel.cancel(); } }); diff --git a/src/client/testing/testController/unittest/testExecutionAdapter.ts b/src/client/testing/testController/unittest/testExecutionAdapter.ts index e72d877e7617..f69ec4379908 100644 --- a/src/client/testing/testController/unittest/testExecutionAdapter.ts +++ b/src/client/testing/testController/unittest/testExecutionAdapter.ts @@ -190,6 +190,7 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { resultProc?.kill(); } else { deferredTillExecClose?.resolve(); + serverCancel.cancel(); } }); diff --git a/src/test/testing/testController/testCancellationRunAdapters.unit.test.ts b/src/test/testing/testController/testCancellationRunAdapters.unit.test.ts index 0c8d09021cc5..1b90244fb41d 100644 --- a/src/test/testing/testController/testCancellationRunAdapters.unit.test.ts +++ b/src/test/testing/testController/testCancellationRunAdapters.unit.test.ts @@ -74,7 +74,6 @@ suite('Execution Flow Run Adapters', () => { .setup((x) => x.execObservable(typeMoq.It.isAny(), typeMoq.It.isAny())) .returns(() => { cancellationToken.cancel(); - deferredTillServerCloseTester?.resolve(); return { proc: mockProc as any, out: typeMoq.Mock.ofType>>().object, @@ -96,8 +95,12 @@ suite('Execution Flow Run Adapters', () => { return Promise.resolve('named-pipe'); }); - utilsStartRunResultNamedPipe.callsFake((_callback, deferredTillServerClose, _token) => { + utilsStartRunResultNamedPipe.callsFake((_callback, deferredTillServerClose, token) => { deferredTillServerCloseTester = deferredTillServerClose; + token?.onCancellationRequested(() => { + deferredTillServerCloseTester?.resolve(); + }); + return Promise.resolve('named-pipes-socket-name'); }); serverDisposeStub.callsFake(() => { @@ -140,7 +143,6 @@ suite('Execution Flow Run Adapters', () => { .setup((x) => x.execObservable(typeMoq.It.isAny(), typeMoq.It.isAny())) .returns(() => { cancellationToken.cancel(); - deferredTillServerCloseTester?.resolve(); return { proc: mockProc as any, out: typeMoq.Mock.ofType>>().object, @@ -164,6 +166,9 @@ suite('Execution Flow Run Adapters', () => { utilsStartRunResultNamedPipe.callsFake((_callback, deferredTillServerClose, _token) => { deferredTillServerCloseTester = deferredTillServerClose; + token?.onCancellationRequested(() => { + deferredTillServerCloseTester?.resolve(); + }); return Promise.resolve('named-pipes-socket-name'); }); serverDisposeStub.callsFake(() => { @@ -188,7 +193,6 @@ suite('Execution Flow Run Adapters', () => { }) .returns(async () => { cancellationToken.cancel(); - deferredTillServerCloseTester?.resolve(); return Promise.resolve(); }); From 140fa1ee8cf1b7dfbb857b44948935a1ce3914f3 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 30 Oct 2024 11:12:53 -0700 Subject: [PATCH 09/18] add logging for linux testing --- .../pytest/pytestExecutionAdapter.ts | 18 ++++++++++++++++++ src/test/testing/common/testingAdapter.test.ts | 6 +++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 716999770b0d..0f1b347ab531 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -38,10 +38,13 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { debugLauncher?: ITestDebugLauncher, interpreter?: PythonEnvironment, ): Promise { + console.log('EJFB running tests'); const deferredTillServerClose: Deferred = utils.createTestingDeferred(); // create callback to handle data received on the named pipe const dataReceivedCallback = (data: ExecutionTestPayload) => { + console.log('EJFB data received callback'); + if (runInstance && !runInstance.token.isCancellationRequested) { this.resultResolver?.resolveExecution(data, runInstance); } else { @@ -57,6 +60,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { cSource.token, // token to cancel ); runInstance?.token.onCancellationRequested(() => { + console.log('EJFB cancelation token hit!'); + traceInfo(`Test run cancelled, resolving 'TillServerClose' deferred for ${uri.fsPath}.`); const executionPayload: ExecutionTestPayload = { cwd: uri.fsPath, @@ -79,6 +84,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { interpreter, ); } finally { + console.log('EJFB await finally'); + await deferredTillServerClose.promise; } @@ -103,6 +110,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { debugLauncher?: ITestDebugLauncher, interpreter?: PythonEnvironment, ): Promise { + console.log('EJFB running tests 2222'); + const relativePathToPytest = 'python_files'; const fullPluginPath = path.join(EXTENSION_ROOT_DIR, relativePathToPytest); const settings = this.configSettings.getSettings(uri); @@ -182,6 +191,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { let resultProc: ChildProcess | undefined; runInstance?.token.onCancellationRequested(() => { + console.log('EJFB run instance canceled'); + traceInfo(`Test run cancelled, killing pytest subprocess for workspace ${uri.fsPath}`); // if the resultProc exists just call kill on it which will handle resolving the ExecClose deferred, otherwise resolve the deferred here. if (resultProc) { @@ -209,6 +220,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { this.outputChannel?.append(out); }); result?.proc?.on('exit', (code, signal) => { + console.log('EJFB on exit'); + this.outputChannel?.append(utils.MESSAGE_ON_TESTING_OUTPUT_MOVE); if (code !== 0 && testIds) { traceError( @@ -218,6 +231,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { }); result?.proc?.on('close', (code, signal) => { + console.log('EJFB on close'); + traceVerbose('Test run finished, subprocess closed.'); // if the child has testIds then this is a run request // if the child process exited with a non-zero exit code, then we need to send the error payload. @@ -234,6 +249,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { } // this doesn't work, it instead directs us to the noop one which is defined first // potentially this is due to the server already being close, if this is the case? + console.log('right before serverDispose'); } // deferredTillEOT is resolved when all data sent on stdout and stderr is received, close event is only called when this occurs @@ -241,6 +257,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { deferredTillExecClose.resolve(); serverCancel.cancel(); }); + console.log('EJFB awaiting deferredTillExecClose'); + await deferredTillExecClose.promise; } } catch (ex) { diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 8a1891962429..6b6c65ade613 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -1144,6 +1144,7 @@ suite('End to End Tests: test adapters', () => { let callCount = 0; let failureOccurred = false; let failureMsg = ''; + console.log('EFB: beginning function'); resultResolver._resolveExecution = async (data, _token?) => { // do the following asserts for each time resolveExecution is called, should be called once per test. console.log(`pytest execution adapter seg fault error handling \n ${JSON.stringify(data)}`); @@ -1169,7 +1170,8 @@ suite('End to End Tests: test adapters', () => { failureMsg = err ? (err as Error).toString() : ''; failureOccurred = true; } - // return Promise.resolve(); + console.log('EJFB returning promise.resolve'); + return Promise.resolve(); }; const testId = `${rootPathErrorWorkspace}/test_seg_fault.py::TestSegmentationFault::test_segfault`; @@ -1195,9 +1197,11 @@ suite('End to End Tests: test adapters', () => { onCancellationRequested: () => undefined, } as any), ); + console.log('EJFB, right before run tests'); await executionAdapter .runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory) .finally(() => { + console.log('EJFB executing assertions'); assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); assert.strictEqual(failureOccurred, false, failureMsg); }); From bdabd104987ab1e9d332ec33202fdfaf87b31781 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 30 Oct 2024 11:32:18 -0700 Subject: [PATCH 10/18] add logging --- .../testing/testController/pytest/pytestExecutionAdapter.ts | 4 ++++ src/testTestingRootWkspc/errorWorkspace/test_seg_fault.py | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 0f1b347ab531..7020c76c9b87 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -72,6 +72,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { }); try { + console.log('EJFB run tests new'); await this.runTestsNew( uri, testIds, @@ -203,6 +204,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { } }); + console.log('EJFB before execObservable'); const result = execService?.execObservable(runArgs, spawnOptions); resultProc = result?.proc; @@ -213,11 +215,13 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { const out = utils.fixLogLinesNoTrailing(data.toString()); runInstance?.appendOutput(out); this.outputChannel?.append(out); + console.log('EJFB stdout', out); }); result?.proc?.stderr?.on('data', (data) => { const out = utils.fixLogLinesNoTrailing(data.toString()); runInstance?.appendOutput(out); this.outputChannel?.append(out); + console.log('EJFB stderr', out); }); result?.proc?.on('exit', (code, signal) => { console.log('EJFB on exit'); diff --git a/src/testTestingRootWkspc/errorWorkspace/test_seg_fault.py b/src/testTestingRootWkspc/errorWorkspace/test_seg_fault.py index bad7ff8fcbbd..80be80f023c2 100644 --- a/src/testTestingRootWkspc/errorWorkspace/test_seg_fault.py +++ b/src/testTestingRootWkspc/errorWorkspace/test_seg_fault.py @@ -7,11 +7,12 @@ class TestSegmentationFault(unittest.TestCase): def cause_segfault(self): + print("Causing a segmentation fault") ctypes.string_at(0) # Dereference a NULL pointer def test_segfault(self): - assert True self.cause_segfault() + assert True if __name__ == "__main__": From dbdd894b93f44c7ef2726b59cee6f64706b44a0e Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 30 Oct 2024 11:48:55 -0700 Subject: [PATCH 11/18] p3 --- .../testing/testController/pytest/pytestExecutionAdapter.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 7020c76c9b87..49745b06c72f 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -122,6 +122,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { const mutableEnv = { ...(await this.envVarsService?.getEnvironmentVariables(uri)), }; + console.log('EJFB after env vars service'); // get python path from mutable env, it contains process.env as well const pythonPathParts: string[] = mutableEnv.PYTHONPATH?.split(path.delimiter) ?? []; const pythonPathCommand = [fullPluginPath, ...pythonPathParts].join(path.delimiter); @@ -140,6 +141,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { }; // need to check what will happen in the exec service is NOT defined and is null const execService = await executionFactory?.createActivatedEnvironment(creationOptions); + console.log('EJFB after exec service'); try { // Remove positional test folders and files, we will add as needed per node let testArgs = removePositionalFoldersAndFiles(pytestArgs); @@ -156,6 +158,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // create a file with the test ids and set the environment variable to the file name const testIdsFileName = await utils.writeTestIdsFile(testIds); + console.log('EJFB after write test ids file'); mutableEnv.RUN_TEST_IDS_PIPE = testIdsFileName; traceInfo(`All environment variables set for pytest execution: ${JSON.stringify(mutableEnv)}`); @@ -182,6 +185,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { serverCancel.cancel(); }); } else { + console.log('EJFB before execObservable'); // deferredTillExecClose is resolved when all stdout and stderr is read const deferredTillExecClose: Deferred = utils.createTestingDeferred(); // combine path to run script with run args From 8c3a4088236e9f2ac1317b77c6ca12e347286ba5 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 30 Oct 2024 12:02:57 -0700 Subject: [PATCH 12/18] retry- remove other tests to isolate --- .../pytest/pytestExecutionAdapter.ts | 1 + .../testing/common/testingAdapter.test.ts | 1214 ++++++++--------- 2 files changed, 608 insertions(+), 607 deletions(-) diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 49745b06c72f..0e90861f2eab 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -139,6 +139,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { resource: uri, interpreter, }; + console.log('EJFB before createActivatedEnvironment', executionFactory); // need to check what will happen in the exec service is NOT defined and is null const execService = await executionFactory?.createActivatedEnvironment(creationOptions); console.log('EJFB after exec service'); diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 6b6c65ade613..80d6cb564e92 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -389,563 +389,563 @@ suite('End to End Tests: test adapters', () => { assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); }); }); - test('pytest discovery adapter small workspace with symlink', async () => { - if (os.platform() === 'win32') { - console.log('Skipping test for windows'); - return; - } + // test('pytest discovery adapter small workspace with symlink', async () => { + // if (os.platform() === 'win32') { + // console.log('Skipping test for windows'); + // return; + // } - // result resolver and saved data for assertions - let actualData: { - cwd: string; - tests?: unknown; - status: 'success' | 'error'; - error?: string[]; - }; - // set workspace to test workspace folder - const testSimpleSymlinkPath = path.join(rootPathDiscoverySymlink, 'test_simple.py'); - workspaceUri = Uri.parse(rootPathDiscoverySymlink); - const stats = fs.lstatSync(rootPathDiscoverySymlink); + // // result resolver and saved data for assertions + // let actualData: { + // cwd: string; + // tests?: unknown; + // status: 'success' | 'error'; + // error?: string[]; + // }; + // // set workspace to test workspace folder + // const testSimpleSymlinkPath = path.join(rootPathDiscoverySymlink, 'test_simple.py'); + // workspaceUri = Uri.parse(rootPathDiscoverySymlink); + // const stats = fs.lstatSync(rootPathDiscoverySymlink); - // confirm that the path is a symbolic link - assert.ok(stats.isSymbolicLink(), 'The path is not a symbolic link but must be for this test.'); + // // confirm that the path is a symbolic link + // assert.ok(stats.isSymbolicLink(), 'The path is not a symbolic link but must be for this test.'); - resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - actualData = payload; - return Promise.resolve(); - }; - // run pytest discovery - const discoveryAdapter = new PytestTestDiscoveryAdapter( - configService, - testOutputChannel.object, - resultResolver, - envVarsService, - ); - configService.getSettings(workspaceUri).testing.pytestArgs = []; + // resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + // let callCount = 0; + // resultResolver._resolveDiscovery = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // actualData = payload; + // return Promise.resolve(); + // }; + // // run pytest discovery + // const discoveryAdapter = new PytestTestDiscoveryAdapter( + // configService, + // testOutputChannel.object, + // resultResolver, + // envVarsService, + // ); + // configService.getSettings(workspaceUri).testing.pytestArgs = []; - await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // verification after discovery is complete + // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // // verification after discovery is complete - // 1. Check the status is "success" - assert.strictEqual( - actualData.status, - 'success', - `Expected status to be 'success' instead status is ${actualData.status}`, - ); // 2. Confirm no errors - assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // 3. Confirm tests are found - assert.ok(actualData.tests, 'Expected tests to be present'); - // 4. Confirm that the cwd returned is the symlink path and the test's path is also using the symlink as the root - if (process.platform === 'win32') { - // covert string to lowercase for windows as the path is case insensitive - traceLog('windows machine detected, converting path to lowercase for comparison'); - const a = actualData.cwd.toLowerCase(); - const b = rootPathDiscoverySymlink.toLowerCase(); - const testSimpleActual = (actualData.tests as { - children: { - path: string; - }[]; - }).children[0].path.toLowerCase(); - const testSimpleExpected = testSimpleSymlinkPath.toLowerCase(); - assert.strictEqual(a, b, `Expected cwd to be the symlink path actual: ${a} expected: ${b}`); - assert.strictEqual( - testSimpleActual, - testSimpleExpected, - `Expected test path to be the symlink path actual: ${testSimpleActual} expected: ${testSimpleExpected}`, - ); - } else { - assert.strictEqual( - path.join(actualData.cwd), - path.join(rootPathDiscoverySymlink), - 'Expected cwd to be the symlink path, check for non-windows machines', - ); - assert.strictEqual( - (actualData.tests as { - children: { - path: string; - }[]; - }).children[0].path, - testSimpleSymlinkPath, - 'Expected test path to be the symlink path, check for non windows machines', - ); - } + // // 1. Check the status is "success" + // assert.strictEqual( + // actualData.status, + // 'success', + // `Expected status to be 'success' instead status is ${actualData.status}`, + // ); // 2. Confirm no errors + // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // // 3. Confirm tests are found + // assert.ok(actualData.tests, 'Expected tests to be present'); + // // 4. Confirm that the cwd returned is the symlink path and the test's path is also using the symlink as the root + // if (process.platform === 'win32') { + // // covert string to lowercase for windows as the path is case insensitive + // traceLog('windows machine detected, converting path to lowercase for comparison'); + // const a = actualData.cwd.toLowerCase(); + // const b = rootPathDiscoverySymlink.toLowerCase(); + // const testSimpleActual = (actualData.tests as { + // children: { + // path: string; + // }[]; + // }).children[0].path.toLowerCase(); + // const testSimpleExpected = testSimpleSymlinkPath.toLowerCase(); + // assert.strictEqual(a, b, `Expected cwd to be the symlink path actual: ${a} expected: ${b}`); + // assert.strictEqual( + // testSimpleActual, + // testSimpleExpected, + // `Expected test path to be the symlink path actual: ${testSimpleActual} expected: ${testSimpleExpected}`, + // ); + // } else { + // assert.strictEqual( + // path.join(actualData.cwd), + // path.join(rootPathDiscoverySymlink), + // 'Expected cwd to be the symlink path, check for non-windows machines', + // ); + // assert.strictEqual( + // (actualData.tests as { + // children: { + // path: string; + // }[]; + // }).children[0].path, + // testSimpleSymlinkPath, + // 'Expected test path to be the symlink path, check for non windows machines', + // ); + // } - // 5. Confirm that resolveDiscovery was called once - assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - }); - }); - test('pytest discovery adapter large workspace', async () => { - // result resolver and saved data for assertions - let actualData: { - cwd: string; - tests?: unknown; - status: 'success' | 'error'; - error?: string[]; - }; - resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - actualData = payload; - return Promise.resolve(); - }; - // run pytest discovery - const discoveryAdapter = new PytestTestDiscoveryAdapter( - configService, - testOutputChannel.object, - resultResolver, - envVarsService, - ); + // // 5. Confirm that resolveDiscovery was called once + // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + // }); + // }); + // test('pytest discovery adapter large workspace', async () => { + // // result resolver and saved data for assertions + // let actualData: { + // cwd: string; + // tests?: unknown; + // status: 'success' | 'error'; + // error?: string[]; + // }; + // resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + // let callCount = 0; + // resultResolver._resolveDiscovery = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // actualData = payload; + // return Promise.resolve(); + // }; + // // run pytest discovery + // const discoveryAdapter = new PytestTestDiscoveryAdapter( + // configService, + // testOutputChannel.object, + // resultResolver, + // envVarsService, + // ); - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathLargeWorkspace); - configService.getSettings(workspaceUri).testing.pytestArgs = []; + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathLargeWorkspace); + // configService.getSettings(workspaceUri).testing.pytestArgs = []; - await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // verification after discovery is complete - // 1. Check the status is "success" - assert.strictEqual( - actualData.status, - 'success', - `Expected status to be 'success' instead status is ${actualData.status}`, - ); // 2. Confirm no errors - assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // 3. Confirm tests are found - assert.ok(actualData.tests, 'Expected tests to be present'); + // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // // verification after discovery is complete + // // 1. Check the status is "success" + // assert.strictEqual( + // actualData.status, + // 'success', + // `Expected status to be 'success' instead status is ${actualData.status}`, + // ); // 2. Confirm no errors + // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // // 3. Confirm tests are found + // assert.ok(actualData.tests, 'Expected tests to be present'); - assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - }); - }); - test('unittest execution adapter small workspace with correct output', async () => { - // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - let failureOccurred = false; - let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - // the payloads that get to the _resolveExecution are all data and should be successful. - try { - assert.strictEqual( - payload.status, - 'success', - `Expected status to be 'success', instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); - } catch (err) { - failureMsg = err ? (err as Error).toString() : ''; - failureOccurred = true; - } - return Promise.resolve(); - }; + // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + // }); + // }); + // test('unittest execution adapter small workspace with correct output', async () => { + // // result resolver and saved data for assertions + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // let failureOccurred = false; + // let failureMsg = ''; + // resultResolver._resolveExecution = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // // the payloads that get to the _resolveExecution are all data and should be successful. + // try { + // assert.strictEqual( + // payload.status, + // 'success', + // `Expected status to be 'success', instead status is ${payload.status}`, + // ); + // assert.ok(payload.result, 'Expected results to be present'); + // } catch (err) { + // failureMsg = err ? (err as Error).toString() : ''; + // failureOccurred = true; + // } + // return Promise.resolve(); + // }; - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathSmallWorkspace); - configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // run execution - const executionAdapter = new UnittestTestExecutionAdapter( - configService, - testOutputChannel.object, - resultResolver, - envVarsService, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - let collectedOutput = ''; - testRun - .setup((t) => t.appendOutput(typeMoq.It.isAny())) - .callback((output: string) => { - collectedOutput += output; - traceLog('appendOutput was called with:', output); - }) - .returns(() => false); - await executionAdapter - .runTests( - workspaceUri, - ['test_simple.SimpleClass.test_simple_unit'], - TestRunProfileKind.Run, - testRun.object, - pythonExecFactory, - ) - .finally(() => { - // verify that the _resolveExecution was called once per test - assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); - assert.strictEqual(failureOccurred, false, failureMsg); + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathSmallWorkspace); + // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // // run execution + // const executionAdapter = new UnittestTestExecutionAdapter( + // configService, + // testOutputChannel.object, + // resultResolver, + // envVarsService, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // let collectedOutput = ''; + // testRun + // .setup((t) => t.appendOutput(typeMoq.It.isAny())) + // .callback((output: string) => { + // collectedOutput += output; + // traceLog('appendOutput was called with:', output); + // }) + // .returns(() => false); + // await executionAdapter + // .runTests( + // workspaceUri, + // ['test_simple.SimpleClass.test_simple_unit'], + // TestRunProfileKind.Run, + // testRun.object, + // pythonExecFactory, + // ) + // .finally(() => { + // // verify that the _resolveExecution was called once per test + // assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); + // assert.strictEqual(failureOccurred, false, failureMsg); - // verify output works for stdout and stderr as well as unittest output - assert.ok( - collectedOutput.includes('expected printed output, stdout'), - 'The test string does not contain the expected stdout output.', - ); - assert.ok( - collectedOutput.includes('expected printed output, stderr'), - 'The test string does not contain the expected stderr output.', - ); - assert.ok( - collectedOutput.includes('Ran 1 test in'), - 'The test string does not contain the expected unittest output.', - ); - }); - }); - test('unittest execution adapter large workspace', async () => { - // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - let failureOccurred = false; - let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - // the payloads that get to the _resolveExecution are all data and should be successful. - try { - const validStatuses = ['subtest-success', 'subtest-failure']; - assert.ok( - validStatuses.includes(payload.status), - `Expected status to be one of ${validStatuses.join(', ')}, but instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); - } catch (err) { - failureMsg = err ? (err as Error).toString() : ''; - failureOccurred = true; - } - return Promise.resolve(); - }; + // // verify output works for stdout and stderr as well as unittest output + // assert.ok( + // collectedOutput.includes('expected printed output, stdout'), + // 'The test string does not contain the expected stdout output.', + // ); + // assert.ok( + // collectedOutput.includes('expected printed output, stderr'), + // 'The test string does not contain the expected stderr output.', + // ); + // assert.ok( + // collectedOutput.includes('Ran 1 test in'), + // 'The test string does not contain the expected unittest output.', + // ); + // }); + // }); + // test('unittest execution adapter large workspace', async () => { + // // result resolver and saved data for assertions + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // let failureOccurred = false; + // let failureMsg = ''; + // resultResolver._resolveExecution = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // // the payloads that get to the _resolveExecution are all data and should be successful. + // try { + // const validStatuses = ['subtest-success', 'subtest-failure']; + // assert.ok( + // validStatuses.includes(payload.status), + // `Expected status to be one of ${validStatuses.join(', ')}, but instead status is ${payload.status}`, + // ); + // assert.ok(payload.result, 'Expected results to be present'); + // } catch (err) { + // failureMsg = err ? (err as Error).toString() : ''; + // failureOccurred = true; + // } + // return Promise.resolve(); + // }; - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathLargeWorkspace); - configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathLargeWorkspace); + // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // run unittest execution - const executionAdapter = new UnittestTestExecutionAdapter( - configService, - testOutputChannel.object, - resultResolver, - envVarsService, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - let collectedOutput = ''; - testRun - .setup((t) => t.appendOutput(typeMoq.It.isAny())) - .callback((output: string) => { - collectedOutput += output; - traceLog('appendOutput was called with:', output); - }) - .returns(() => false); - await executionAdapter - .runTests( - workspaceUri, - ['test_parameterized_subtest.NumbersTest.test_even'], - TestRunProfileKind.Run, - testRun.object, - pythonExecFactory, - ) - .then(() => { - // verify that the _resolveExecution was called once per test - assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); - assert.strictEqual(failureOccurred, false, failureMsg); + // // run unittest execution + // const executionAdapter = new UnittestTestExecutionAdapter( + // configService, + // testOutputChannel.object, + // resultResolver, + // envVarsService, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // let collectedOutput = ''; + // testRun + // .setup((t) => t.appendOutput(typeMoq.It.isAny())) + // .callback((output: string) => { + // collectedOutput += output; + // traceLog('appendOutput was called with:', output); + // }) + // .returns(() => false); + // await executionAdapter + // .runTests( + // workspaceUri, + // ['test_parameterized_subtest.NumbersTest.test_even'], + // TestRunProfileKind.Run, + // testRun.object, + // pythonExecFactory, + // ) + // .then(() => { + // // verify that the _resolveExecution was called once per test + // assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); + // assert.strictEqual(failureOccurred, false, failureMsg); - // verify output - assert.ok( - collectedOutput.includes('test_parameterized_subtest.py'), - 'The test string does not contain the correct test name which should be printed', - ); - assert.ok( - collectedOutput.includes('FAILED (failures=1000)'), - 'The test string does not contain the last of the unittest output', - ); - }); - }); - test('pytest execution adapter small workspace with correct output', async () => { - // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - let failureOccurred = false; - let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - // the payloads that get to the _resolveExecution are all data and should be successful. - try { - assert.strictEqual( - payload.status, - 'success', - `Expected status to be 'success', instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); - } catch (err) { - failureMsg = err ? (err as Error).toString() : ''; - failureOccurred = true; - } - return Promise.resolve(); - }; - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathSmallWorkspace); - configService.getSettings(workspaceUri).testing.pytestArgs = []; + // // verify output + // assert.ok( + // collectedOutput.includes('test_parameterized_subtest.py'), + // 'The test string does not contain the correct test name which should be printed', + // ); + // assert.ok( + // collectedOutput.includes('FAILED (failures=1000)'), + // 'The test string does not contain the last of the unittest output', + // ); + // }); + // }); + // test('pytest execution adapter small workspace with correct output', async () => { + // // result resolver and saved data for assertions + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // let failureOccurred = false; + // let failureMsg = ''; + // resultResolver._resolveExecution = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // // the payloads that get to the _resolveExecution are all data and should be successful. + // try { + // assert.strictEqual( + // payload.status, + // 'success', + // `Expected status to be 'success', instead status is ${payload.status}`, + // ); + // assert.ok(payload.result, 'Expected results to be present'); + // } catch (err) { + // failureMsg = err ? (err as Error).toString() : ''; + // failureOccurred = true; + // } + // return Promise.resolve(); + // }; + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathSmallWorkspace); + // configService.getSettings(workspaceUri).testing.pytestArgs = []; - // run pytest execution - const executionAdapter = new PytestTestExecutionAdapter( - configService, - testOutputChannel.object, - resultResolver, - envVarsService, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - let collectedOutput = ''; - testRun - .setup((t) => t.appendOutput(typeMoq.It.isAny())) - .callback((output: string) => { - collectedOutput += output; - traceLog('appendOutput was called with:', output); - }) - .returns(() => false); - await executionAdapter - .runTests( - workspaceUri, - [`${rootPathSmallWorkspace}/test_simple.py::test_a`], - TestRunProfileKind.Run, - testRun.object, - pythonExecFactory, - ) - .then(() => { - // verify that the _resolveExecution was called once per test - assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); - assert.strictEqual(failureOccurred, false, failureMsg); + // // run pytest execution + // const executionAdapter = new PytestTestExecutionAdapter( + // configService, + // testOutputChannel.object, + // resultResolver, + // envVarsService, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // let collectedOutput = ''; + // testRun + // .setup((t) => t.appendOutput(typeMoq.It.isAny())) + // .callback((output: string) => { + // collectedOutput += output; + // traceLog('appendOutput was called with:', output); + // }) + // .returns(() => false); + // await executionAdapter + // .runTests( + // workspaceUri, + // [`${rootPathSmallWorkspace}/test_simple.py::test_a`], + // TestRunProfileKind.Run, + // testRun.object, + // pythonExecFactory, + // ) + // .then(() => { + // // verify that the _resolveExecution was called once per test + // assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); + // assert.strictEqual(failureOccurred, false, failureMsg); - // verify output works for stdout and stderr as well as pytest output - assert.ok( - collectedOutput.includes('test session starts'), - 'The test string does not contain the expected stdout output.', - ); - assert.ok( - collectedOutput.includes('Captured log call'), - 'The test string does not contain the expected log section.', - ); - const searchStrings = [ - 'This is a warning message.', - 'This is an error message.', - 'This is a critical message.', - ]; - let searchString: string; - for (searchString of searchStrings) { - const count: number = (collectedOutput.match(new RegExp(searchString, 'g')) || []).length; - assert.strictEqual( - count, - 2, - `The test string does not contain two instances of ${searchString}. Should appear twice from logging output and stack trace`, - ); - } - }); - }); + // // verify output works for stdout and stderr as well as pytest output + // assert.ok( + // collectedOutput.includes('test session starts'), + // 'The test string does not contain the expected stdout output.', + // ); + // assert.ok( + // collectedOutput.includes('Captured log call'), + // 'The test string does not contain the expected log section.', + // ); + // const searchStrings = [ + // 'This is a warning message.', + // 'This is an error message.', + // 'This is a critical message.', + // ]; + // let searchString: string; + // for (searchString of searchStrings) { + // const count: number = (collectedOutput.match(new RegExp(searchString, 'g')) || []).length; + // assert.strictEqual( + // count, + // 2, + // `The test string does not contain two instances of ${searchString}. Should appear twice from logging output and stack trace`, + // ); + // } + // }); + // }); - test('Unittest execution with coverage, small workspace', async () => { - // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - resultResolver._resolveCoverage = async (payload, _token?) => { - assert.strictEqual(payload.cwd, rootPathCoverageWorkspace, 'Expected cwd to be the workspace folder'); - assert.ok(payload.result, 'Expected results to be present'); - const simpleFileCov = payload.result[`${rootPathCoverageWorkspace}/even.py`]; - assert.ok(simpleFileCov, 'Expected test_simple.py coverage to be present'); - // since only one test was run, the other test in the same file will have missed coverage lines - assert.strictEqual(simpleFileCov.lines_covered.length, 3, 'Expected 1 line to be covered in even.py'); - assert.strictEqual(simpleFileCov.lines_missed.length, 1, 'Expected 3 lines to be missed in even.py'); - return Promise.resolve(); - }; + // test('Unittest execution with coverage, small workspace', async () => { + // // result resolver and saved data for assertions + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // resultResolver._resolveCoverage = async (payload, _token?) => { + // assert.strictEqual(payload.cwd, rootPathCoverageWorkspace, 'Expected cwd to be the workspace folder'); + // assert.ok(payload.result, 'Expected results to be present'); + // const simpleFileCov = payload.result[`${rootPathCoverageWorkspace}/even.py`]; + // assert.ok(simpleFileCov, 'Expected test_simple.py coverage to be present'); + // // since only one test was run, the other test in the same file will have missed coverage lines + // assert.strictEqual(simpleFileCov.lines_covered.length, 3, 'Expected 1 line to be covered in even.py'); + // assert.strictEqual(simpleFileCov.lines_missed.length, 1, 'Expected 3 lines to be missed in even.py'); + // return Promise.resolve(); + // }; - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathCoverageWorkspace); - configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // run execution - const executionAdapter = new UnittestTestExecutionAdapter( - configService, - testOutputChannel.object, - resultResolver, - envVarsService, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - let collectedOutput = ''; - testRun - .setup((t) => t.appendOutput(typeMoq.It.isAny())) - .callback((output: string) => { - collectedOutput += output; - traceLog('appendOutput was called with:', output); - }) - .returns(() => false); - await executionAdapter - .runTests( - workspaceUri, - ['test_even.TestNumbers.test_odd'], - TestRunProfileKind.Coverage, - testRun.object, - pythonExecFactory, - ) - .finally(() => { - assert.ok(collectedOutput, 'expect output to be collected'); - }); - }); - test('pytest coverage execution, small workspace', async () => { - // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - resultResolver._resolveCoverage = async (payload, _runInstance?) => { - assert.strictEqual(payload.cwd, rootPathCoverageWorkspace, 'Expected cwd to be the workspace folder'); - assert.ok(payload.result, 'Expected results to be present'); - const simpleFileCov = payload.result[`${rootPathCoverageWorkspace}/even.py`]; - assert.ok(simpleFileCov, 'Expected test_simple.py coverage to be present'); - // since only one test was run, the other test in the same file will have missed coverage lines - assert.strictEqual(simpleFileCov.lines_covered.length, 3, 'Expected 1 line to be covered in even.py'); - assert.strictEqual(simpleFileCov.lines_missed.length, 1, 'Expected 3 lines to be missed in even.py'); + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathCoverageWorkspace); + // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // // run execution + // const executionAdapter = new UnittestTestExecutionAdapter( + // configService, + // testOutputChannel.object, + // resultResolver, + // envVarsService, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // let collectedOutput = ''; + // testRun + // .setup((t) => t.appendOutput(typeMoq.It.isAny())) + // .callback((output: string) => { + // collectedOutput += output; + // traceLog('appendOutput was called with:', output); + // }) + // .returns(() => false); + // await executionAdapter + // .runTests( + // workspaceUri, + // ['test_even.TestNumbers.test_odd'], + // TestRunProfileKind.Coverage, + // testRun.object, + // pythonExecFactory, + // ) + // .finally(() => { + // assert.ok(collectedOutput, 'expect output to be collected'); + // }); + // }); + // test('pytest coverage execution, small workspace', async () => { + // // result resolver and saved data for assertions + // resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + // resultResolver._resolveCoverage = async (payload, _runInstance?) => { + // assert.strictEqual(payload.cwd, rootPathCoverageWorkspace, 'Expected cwd to be the workspace folder'); + // assert.ok(payload.result, 'Expected results to be present'); + // const simpleFileCov = payload.result[`${rootPathCoverageWorkspace}/even.py`]; + // assert.ok(simpleFileCov, 'Expected test_simple.py coverage to be present'); + // // since only one test was run, the other test in the same file will have missed coverage lines + // assert.strictEqual(simpleFileCov.lines_covered.length, 3, 'Expected 1 line to be covered in even.py'); + // assert.strictEqual(simpleFileCov.lines_missed.length, 1, 'Expected 3 lines to be missed in even.py'); - return Promise.resolve(); - }; - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathCoverageWorkspace); - configService.getSettings(workspaceUri).testing.pytestArgs = []; + // return Promise.resolve(); + // }; + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathCoverageWorkspace); + // configService.getSettings(workspaceUri).testing.pytestArgs = []; - // run pytest execution - const executionAdapter = new PytestTestExecutionAdapter( - configService, - testOutputChannel.object, - resultResolver, - envVarsService, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - let collectedOutput = ''; - testRun - .setup((t) => t.appendOutput(typeMoq.It.isAny())) - .callback((output: string) => { - collectedOutput += output; - traceLog('appendOutput was called with:', output); - }) - .returns(() => false); - await executionAdapter - .runTests( - workspaceUri, - [`${rootPathCoverageWorkspace}/test_even.py::TestNumbers::test_odd`], - TestRunProfileKind.Coverage, - testRun.object, - pythonExecFactory, - ) - .then(() => { - assert.ok(collectedOutput, 'expect output to be collected'); - }); - }); - test('pytest execution adapter large workspace', async () => { - // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - let failureOccurred = false; - let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - // the payloads that get to the _resolveExecution are all data and should be successful. - try { - assert.strictEqual( - payload.status, - 'success', - `Expected status to be 'success', instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); - } catch (err) { - failureMsg = err ? (err as Error).toString() : ''; - failureOccurred = true; - } - return Promise.resolve(); - }; + // // run pytest execution + // const executionAdapter = new PytestTestExecutionAdapter( + // configService, + // testOutputChannel.object, + // resultResolver, + // envVarsService, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // let collectedOutput = ''; + // testRun + // .setup((t) => t.appendOutput(typeMoq.It.isAny())) + // .callback((output: string) => { + // collectedOutput += output; + // traceLog('appendOutput was called with:', output); + // }) + // .returns(() => false); + // await executionAdapter + // .runTests( + // workspaceUri, + // [`${rootPathCoverageWorkspace}/test_even.py::TestNumbers::test_odd`], + // TestRunProfileKind.Coverage, + // testRun.object, + // pythonExecFactory, + // ) + // .then(() => { + // assert.ok(collectedOutput, 'expect output to be collected'); + // }); + // }); + // test('pytest execution adapter large workspace', async () => { + // // result resolver and saved data for assertions + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // let failureOccurred = false; + // let failureMsg = ''; + // resultResolver._resolveExecution = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // // the payloads that get to the _resolveExecution are all data and should be successful. + // try { + // assert.strictEqual( + // payload.status, + // 'success', + // `Expected status to be 'success', instead status is ${payload.status}`, + // ); + // assert.ok(payload.result, 'Expected results to be present'); + // } catch (err) { + // failureMsg = err ? (err as Error).toString() : ''; + // failureOccurred = true; + // } + // return Promise.resolve(); + // }; - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathLargeWorkspace); - configService.getSettings(workspaceUri).testing.pytestArgs = []; + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathLargeWorkspace); + // configService.getSettings(workspaceUri).testing.pytestArgs = []; - // generate list of test_ids - const testIds: string[] = []; - for (let i = 0; i < 2000; i = i + 1) { - const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; - testIds.push(testId); - } + // // generate list of test_ids + // const testIds: string[] = []; + // for (let i = 0; i < 2000; i = i + 1) { + // const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; + // testIds.push(testId); + // } - // run pytest execution - const executionAdapter = new PytestTestExecutionAdapter( - configService, - testOutputChannel.object, - resultResolver, - envVarsService, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - let collectedOutput = ''; - testRun - .setup((t) => t.appendOutput(typeMoq.It.isAny())) - .callback((output: string) => { - collectedOutput += output; - traceLog('appendOutput was called with:', output); - }) - .returns(() => false); - await executionAdapter - .runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory) - .then(() => { - // verify that the _resolveExecution was called once per test - assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); - assert.strictEqual(failureOccurred, false, failureMsg); + // // run pytest execution + // const executionAdapter = new PytestTestExecutionAdapter( + // configService, + // testOutputChannel.object, + // resultResolver, + // envVarsService, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // let collectedOutput = ''; + // testRun + // .setup((t) => t.appendOutput(typeMoq.It.isAny())) + // .callback((output: string) => { + // collectedOutput += output; + // traceLog('appendOutput was called with:', output); + // }) + // .returns(() => false); + // await executionAdapter + // .runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory) + // .then(() => { + // // verify that the _resolveExecution was called once per test + // assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); + // assert.strictEqual(failureOccurred, false, failureMsg); - // verify output works for large repo - assert.ok( - collectedOutput.includes('test session starts'), - 'The test string does not contain the expected stdout output from pytest.', - ); - }); - }); + // // verify output works for large repo + // assert.ok( + // collectedOutput.includes('test session starts'), + // 'The test string does not contain the expected stdout output from pytest.', + // ); + // }); + // }); test('unittest discovery adapter seg fault error handling', async () => { resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); let callCount = 0; @@ -1062,83 +1062,83 @@ suite('End to End Tests: test adapters', () => { assert.strictEqual(failureOccurred, false, failureMsg); }); }); - test('unittest execution adapter seg fault error handling', async () => { - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - let failureOccurred = false; - let failureMsg = ''; - resultResolver._resolveExecution = async (data, _token?) => { - // do the following asserts for each time resolveExecution is called, should be called once per test. - callCount = callCount + 1; - traceLog(`unittest execution adapter seg fault error handling \n ${JSON.stringify(data)}`); - try { - if (data.status === 'error') { - if (data.error === undefined) { - // Dereference a NULL pointer - const indexOfTest = JSON.stringify(data).search('Dereference a NULL pointer'); - if (indexOfTest === -1) { - failureOccurred = true; - failureMsg = 'Expected test to have a null pointer'; - } - } else if (data.error.length === 0) { - failureOccurred = true; - failureMsg = "Expected errors in 'error' field"; - } - } else { - const indexOfTest = JSON.stringify(data.result).search('error'); - if (indexOfTest === -1) { - failureOccurred = true; - failureMsg = - 'If payload status is not error then the individual tests should be marked as errors. This should occur on windows machines.'; - } - } - if (data.result === undefined) { - failureOccurred = true; - failureMsg = 'Expected results to be present'; - } - // make sure the testID is found in the results - const indexOfTest = JSON.stringify(data).search('test_seg_fault.TestSegmentationFault.test_segfault'); - if (indexOfTest === -1) { - failureOccurred = true; - failureMsg = 'Expected testId to be present'; - } - } catch (err) { - failureMsg = err ? (err as Error).toString() : ''; - failureOccurred = true; - } - return Promise.resolve(); - }; + // test('unittest execution adapter seg fault error handling', async () => { + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // let failureOccurred = false; + // let failureMsg = ''; + // resultResolver._resolveExecution = async (data, _token?) => { + // // do the following asserts for each time resolveExecution is called, should be called once per test. + // callCount = callCount + 1; + // traceLog(`unittest execution adapter seg fault error handling \n ${JSON.stringify(data)}`); + // try { + // if (data.status === 'error') { + // if (data.error === undefined) { + // // Dereference a NULL pointer + // const indexOfTest = JSON.stringify(data).search('Dereference a NULL pointer'); + // if (indexOfTest === -1) { + // failureOccurred = true; + // failureMsg = 'Expected test to have a null pointer'; + // } + // } else if (data.error.length === 0) { + // failureOccurred = true; + // failureMsg = "Expected errors in 'error' field"; + // } + // } else { + // const indexOfTest = JSON.stringify(data.result).search('error'); + // if (indexOfTest === -1) { + // failureOccurred = true; + // failureMsg = + // 'If payload status is not error then the individual tests should be marked as errors. This should occur on windows machines.'; + // } + // } + // if (data.result === undefined) { + // failureOccurred = true; + // failureMsg = 'Expected results to be present'; + // } + // // make sure the testID is found in the results + // const indexOfTest = JSON.stringify(data).search('test_seg_fault.TestSegmentationFault.test_segfault'); + // if (indexOfTest === -1) { + // failureOccurred = true; + // failureMsg = 'Expected testId to be present'; + // } + // } catch (err) { + // failureMsg = err ? (err as Error).toString() : ''; + // failureOccurred = true; + // } + // return Promise.resolve(); + // }; - const testId = `test_seg_fault.TestSegmentationFault.test_segfault`; - const testIds: string[] = [testId]; + // const testId = `test_seg_fault.TestSegmentationFault.test_segfault`; + // const testIds: string[] = [testId]; - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathErrorWorkspace); - configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathErrorWorkspace); + // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // run pytest execution - const executionAdapter = new UnittestTestExecutionAdapter( - configService, - testOutputChannel.object, - resultResolver, - envVarsService, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - await executionAdapter - .runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory) - .finally(() => { - assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); - assert.strictEqual(failureOccurred, false, failureMsg); - }); - }); + // // run pytest execution + // const executionAdapter = new UnittestTestExecutionAdapter( + // configService, + // testOutputChannel.object, + // resultResolver, + // envVarsService, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // await executionAdapter + // .runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory) + // .finally(() => { + // assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); + // assert.strictEqual(failureOccurred, false, failureMsg); + // }); + // }); test('pytest execution adapter seg fault error handling', async () => { resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); let callCount = 0; From 6e8e7770aed789b81dfba13e50b54390a13dd849 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 30 Oct 2024 13:05:48 -0700 Subject: [PATCH 13/18] linting issue --- src/test/testing/common/testingAdapter.test.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 80d6cb564e92..d1e7e55c7d11 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -17,7 +17,7 @@ import { EXTENSION_ROOT_DIR_FOR_TESTS, initialize } from '../../initialize'; import { traceError, traceLog } from '../../../client/logging'; import { PytestTestExecutionAdapter } from '../../../client/testing/testController/pytest/pytestExecutionAdapter'; import { UnittestTestDiscoveryAdapter } from '../../../client/testing/testController/unittest/testDiscoveryAdapter'; -import { UnittestTestExecutionAdapter } from '../../../client/testing/testController/unittest/testExecutionAdapter'; +// import { UnittestTestExecutionAdapter } from '../../../client/testing/testController/unittest/testExecutionAdapter'; import { PythonResultResolver } from '../../../client/testing/testController/common/resultResolver'; import { TestProvider } from '../../../client/testing/types'; import { PYTEST_PROVIDER, UNITTEST_PROVIDER } from '../../../client/testing/common/constants'; @@ -74,12 +74,12 @@ suite('End to End Tests: test adapters', () => { 'testTestingRootWkspc', 'symlink_parent-folder', ); - const rootPathCoverageWorkspace = path.join( - EXTENSION_ROOT_DIR_FOR_TESTS, - 'src', - 'testTestingRootWkspc', - 'coverageWorkspace', - ); + // const rootPathCoverageWorkspace = path.join( + // EXTENSION_ROOT_DIR_FOR_TESTS, + // 'src', + // 'testTestingRootWkspc', + // 'coverageWorkspace', + // ); suiteSetup(async () => { serviceContainer = (await initialize()).serviceContainer; From 69ab0b84970852806aa63e0a0b70452f954a3f72 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 30 Oct 2024 13:32:16 -0700 Subject: [PATCH 14/18] moving service container instantiation --- .../testing/common/testingAdapter.test.ts | 1085 ++++++++--------- 1 file changed, 542 insertions(+), 543 deletions(-) diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index d1e7e55c7d11..6d6d56ce3468 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -17,7 +17,7 @@ import { EXTENSION_ROOT_DIR_FOR_TESTS, initialize } from '../../initialize'; import { traceError, traceLog } from '../../../client/logging'; import { PytestTestExecutionAdapter } from '../../../client/testing/testController/pytest/pytestExecutionAdapter'; import { UnittestTestDiscoveryAdapter } from '../../../client/testing/testController/unittest/testDiscoveryAdapter'; -// import { UnittestTestExecutionAdapter } from '../../../client/testing/testController/unittest/testExecutionAdapter'; +import { UnittestTestExecutionAdapter } from '../../../client/testing/testController/unittest/testExecutionAdapter'; import { PythonResultResolver } from '../../../client/testing/testController/common/resultResolver'; import { TestProvider } from '../../../client/testing/types'; import { PYTEST_PROVIDER, UNITTEST_PROVIDER } from '../../../client/testing/common/constants'; @@ -74,15 +74,13 @@ suite('End to End Tests: test adapters', () => { 'testTestingRootWkspc', 'symlink_parent-folder', ); - // const rootPathCoverageWorkspace = path.join( - // EXTENSION_ROOT_DIR_FOR_TESTS, - // 'src', - // 'testTestingRootWkspc', - // 'coverageWorkspace', - // ); + const rootPathCoverageWorkspace = path.join( + EXTENSION_ROOT_DIR_FOR_TESTS, + 'src', + 'testTestingRootWkspc', + 'coverageWorkspace', + ); suiteSetup(async () => { - serviceContainer = (await initialize()).serviceContainer; - // create symlink for specific symlink test const target = rootPathSmallWorkspace; const dest = rootPathDiscoverySymlink; @@ -107,6 +105,7 @@ suite('End to End Tests: test adapters', () => { }); setup(async () => { + serviceContainer = (await initialize()).serviceContainer; getPixiStub = sinon.stub(pixi, 'getPixi'); getPixiStub.resolves(undefined); @@ -389,563 +388,563 @@ suite('End to End Tests: test adapters', () => { assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); }); }); - // test('pytest discovery adapter small workspace with symlink', async () => { - // if (os.platform() === 'win32') { - // console.log('Skipping test for windows'); - // return; - // } - - // // result resolver and saved data for assertions - // let actualData: { - // cwd: string; - // tests?: unknown; - // status: 'success' | 'error'; - // error?: string[]; - // }; - // // set workspace to test workspace folder - // const testSimpleSymlinkPath = path.join(rootPathDiscoverySymlink, 'test_simple.py'); - // workspaceUri = Uri.parse(rootPathDiscoverySymlink); - // const stats = fs.lstatSync(rootPathDiscoverySymlink); + test('pytest discovery adapter small workspace with symlink', async () => { + if (os.platform() === 'win32') { + console.log('Skipping test for windows'); + return; + } - // // confirm that the path is a symbolic link - // assert.ok(stats.isSymbolicLink(), 'The path is not a symbolic link but must be for this test.'); + // result resolver and saved data for assertions + let actualData: { + cwd: string; + tests?: unknown; + status: 'success' | 'error'; + error?: string[]; + }; + // set workspace to test workspace folder + const testSimpleSymlinkPath = path.join(rootPathDiscoverySymlink, 'test_simple.py'); + workspaceUri = Uri.parse(rootPathDiscoverySymlink); + const stats = fs.lstatSync(rootPathDiscoverySymlink); - // resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - // let callCount = 0; - // resultResolver._resolveDiscovery = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // actualData = payload; - // return Promise.resolve(); - // }; - // // run pytest discovery - // const discoveryAdapter = new PytestTestDiscoveryAdapter( - // configService, - // testOutputChannel.object, - // resultResolver, - // envVarsService, - // ); - // configService.getSettings(workspaceUri).testing.pytestArgs = []; + // confirm that the path is a symbolic link + assert.ok(stats.isSymbolicLink(), 'The path is not a symbolic link but must be for this test.'); - // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // // verification after discovery is complete + resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + let callCount = 0; + resultResolver._resolveDiscovery = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + actualData = payload; + return Promise.resolve(); + }; + // run pytest discovery + const discoveryAdapter = new PytestTestDiscoveryAdapter( + configService, + testOutputChannel.object, + resultResolver, + envVarsService, + ); + configService.getSettings(workspaceUri).testing.pytestArgs = []; - // // 1. Check the status is "success" - // assert.strictEqual( - // actualData.status, - // 'success', - // `Expected status to be 'success' instead status is ${actualData.status}`, - // ); // 2. Confirm no errors - // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // // 3. Confirm tests are found - // assert.ok(actualData.tests, 'Expected tests to be present'); - // // 4. Confirm that the cwd returned is the symlink path and the test's path is also using the symlink as the root - // if (process.platform === 'win32') { - // // covert string to lowercase for windows as the path is case insensitive - // traceLog('windows machine detected, converting path to lowercase for comparison'); - // const a = actualData.cwd.toLowerCase(); - // const b = rootPathDiscoverySymlink.toLowerCase(); - // const testSimpleActual = (actualData.tests as { - // children: { - // path: string; - // }[]; - // }).children[0].path.toLowerCase(); - // const testSimpleExpected = testSimpleSymlinkPath.toLowerCase(); - // assert.strictEqual(a, b, `Expected cwd to be the symlink path actual: ${a} expected: ${b}`); - // assert.strictEqual( - // testSimpleActual, - // testSimpleExpected, - // `Expected test path to be the symlink path actual: ${testSimpleActual} expected: ${testSimpleExpected}`, - // ); - // } else { - // assert.strictEqual( - // path.join(actualData.cwd), - // path.join(rootPathDiscoverySymlink), - // 'Expected cwd to be the symlink path, check for non-windows machines', - // ); - // assert.strictEqual( - // (actualData.tests as { - // children: { - // path: string; - // }[]; - // }).children[0].path, - // testSimpleSymlinkPath, - // 'Expected test path to be the symlink path, check for non windows machines', - // ); - // } + await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // verification after discovery is complete - // // 5. Confirm that resolveDiscovery was called once - // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - // }); - // }); - // test('pytest discovery adapter large workspace', async () => { - // // result resolver and saved data for assertions - // let actualData: { - // cwd: string; - // tests?: unknown; - // status: 'success' | 'error'; - // error?: string[]; - // }; - // resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - // let callCount = 0; - // resultResolver._resolveDiscovery = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // actualData = payload; - // return Promise.resolve(); - // }; - // // run pytest discovery - // const discoveryAdapter = new PytestTestDiscoveryAdapter( - // configService, - // testOutputChannel.object, - // resultResolver, - // envVarsService, - // ); + // 1. Check the status is "success" + assert.strictEqual( + actualData.status, + 'success', + `Expected status to be 'success' instead status is ${actualData.status}`, + ); // 2. Confirm no errors + assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // 3. Confirm tests are found + assert.ok(actualData.tests, 'Expected tests to be present'); + // 4. Confirm that the cwd returned is the symlink path and the test's path is also using the symlink as the root + if (process.platform === 'win32') { + // covert string to lowercase for windows as the path is case insensitive + traceLog('windows machine detected, converting path to lowercase for comparison'); + const a = actualData.cwd.toLowerCase(); + const b = rootPathDiscoverySymlink.toLowerCase(); + const testSimpleActual = (actualData.tests as { + children: { + path: string; + }[]; + }).children[0].path.toLowerCase(); + const testSimpleExpected = testSimpleSymlinkPath.toLowerCase(); + assert.strictEqual(a, b, `Expected cwd to be the symlink path actual: ${a} expected: ${b}`); + assert.strictEqual( + testSimpleActual, + testSimpleExpected, + `Expected test path to be the symlink path actual: ${testSimpleActual} expected: ${testSimpleExpected}`, + ); + } else { + assert.strictEqual( + path.join(actualData.cwd), + path.join(rootPathDiscoverySymlink), + 'Expected cwd to be the symlink path, check for non-windows machines', + ); + assert.strictEqual( + (actualData.tests as { + children: { + path: string; + }[]; + }).children[0].path, + testSimpleSymlinkPath, + 'Expected test path to be the symlink path, check for non windows machines', + ); + } - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathLargeWorkspace); - // configService.getSettings(workspaceUri).testing.pytestArgs = []; + // 5. Confirm that resolveDiscovery was called once + assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + }); + }); + test('pytest discovery adapter large workspace', async () => { + // result resolver and saved data for assertions + let actualData: { + cwd: string; + tests?: unknown; + status: 'success' | 'error'; + error?: string[]; + }; + resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + let callCount = 0; + resultResolver._resolveDiscovery = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + actualData = payload; + return Promise.resolve(); + }; + // run pytest discovery + const discoveryAdapter = new PytestTestDiscoveryAdapter( + configService, + testOutputChannel.object, + resultResolver, + envVarsService, + ); - // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // // verification after discovery is complete - // // 1. Check the status is "success" - // assert.strictEqual( - // actualData.status, - // 'success', - // `Expected status to be 'success' instead status is ${actualData.status}`, - // ); // 2. Confirm no errors - // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // // 3. Confirm tests are found - // assert.ok(actualData.tests, 'Expected tests to be present'); + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathLargeWorkspace); + configService.getSettings(workspaceUri).testing.pytestArgs = []; - // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - // }); - // }); - // test('unittest execution adapter small workspace with correct output', async () => { - // // result resolver and saved data for assertions - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // let failureOccurred = false; - // let failureMsg = ''; - // resultResolver._resolveExecution = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // // the payloads that get to the _resolveExecution are all data and should be successful. - // try { - // assert.strictEqual( - // payload.status, - // 'success', - // `Expected status to be 'success', instead status is ${payload.status}`, - // ); - // assert.ok(payload.result, 'Expected results to be present'); - // } catch (err) { - // failureMsg = err ? (err as Error).toString() : ''; - // failureOccurred = true; - // } - // return Promise.resolve(); - // }; + await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // verification after discovery is complete + // 1. Check the status is "success" + assert.strictEqual( + actualData.status, + 'success', + `Expected status to be 'success' instead status is ${actualData.status}`, + ); // 2. Confirm no errors + assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // 3. Confirm tests are found + assert.ok(actualData.tests, 'Expected tests to be present'); - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathSmallWorkspace); - // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // // run execution - // const executionAdapter = new UnittestTestExecutionAdapter( - // configService, - // testOutputChannel.object, - // resultResolver, - // envVarsService, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // let collectedOutput = ''; - // testRun - // .setup((t) => t.appendOutput(typeMoq.It.isAny())) - // .callback((output: string) => { - // collectedOutput += output; - // traceLog('appendOutput was called with:', output); - // }) - // .returns(() => false); - // await executionAdapter - // .runTests( - // workspaceUri, - // ['test_simple.SimpleClass.test_simple_unit'], - // TestRunProfileKind.Run, - // testRun.object, - // pythonExecFactory, - // ) - // .finally(() => { - // // verify that the _resolveExecution was called once per test - // assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); - // assert.strictEqual(failureOccurred, false, failureMsg); + assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + }); + }); + test('unittest execution adapter small workspace with correct output', async () => { + // result resolver and saved data for assertions + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + let failureOccurred = false; + let failureMsg = ''; + resultResolver._resolveExecution = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + // the payloads that get to the _resolveExecution are all data and should be successful. + try { + assert.strictEqual( + payload.status, + 'success', + `Expected status to be 'success', instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } catch (err) { + failureMsg = err ? (err as Error).toString() : ''; + failureOccurred = true; + } + return Promise.resolve(); + }; - // // verify output works for stdout and stderr as well as unittest output - // assert.ok( - // collectedOutput.includes('expected printed output, stdout'), - // 'The test string does not contain the expected stdout output.', - // ); - // assert.ok( - // collectedOutput.includes('expected printed output, stderr'), - // 'The test string does not contain the expected stderr output.', - // ); - // assert.ok( - // collectedOutput.includes('Ran 1 test in'), - // 'The test string does not contain the expected unittest output.', - // ); - // }); - // }); - // test('unittest execution adapter large workspace', async () => { - // // result resolver and saved data for assertions - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // let failureOccurred = false; - // let failureMsg = ''; - // resultResolver._resolveExecution = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // // the payloads that get to the _resolveExecution are all data and should be successful. - // try { - // const validStatuses = ['subtest-success', 'subtest-failure']; - // assert.ok( - // validStatuses.includes(payload.status), - // `Expected status to be one of ${validStatuses.join(', ')}, but instead status is ${payload.status}`, - // ); - // assert.ok(payload.result, 'Expected results to be present'); - // } catch (err) { - // failureMsg = err ? (err as Error).toString() : ''; - // failureOccurred = true; - // } - // return Promise.resolve(); - // }; + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathSmallWorkspace); + configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // run execution + const executionAdapter = new UnittestTestExecutionAdapter( + configService, + testOutputChannel.object, + resultResolver, + envVarsService, + ); + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); + await executionAdapter + .runTests( + workspaceUri, + ['test_simple.SimpleClass.test_simple_unit'], + TestRunProfileKind.Run, + testRun.object, + pythonExecFactory, + ) + .finally(() => { + // verify that the _resolveExecution was called once per test + assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); + assert.strictEqual(failureOccurred, false, failureMsg); - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathLargeWorkspace); - // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // verify output works for stdout and stderr as well as unittest output + assert.ok( + collectedOutput.includes('expected printed output, stdout'), + 'The test string does not contain the expected stdout output.', + ); + assert.ok( + collectedOutput.includes('expected printed output, stderr'), + 'The test string does not contain the expected stderr output.', + ); + assert.ok( + collectedOutput.includes('Ran 1 test in'), + 'The test string does not contain the expected unittest output.', + ); + }); + }); + test('unittest execution adapter large workspace', async () => { + // result resolver and saved data for assertions + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + let failureOccurred = false; + let failureMsg = ''; + resultResolver._resolveExecution = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + // the payloads that get to the _resolveExecution are all data and should be successful. + try { + const validStatuses = ['subtest-success', 'subtest-failure']; + assert.ok( + validStatuses.includes(payload.status), + `Expected status to be one of ${validStatuses.join(', ')}, but instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } catch (err) { + failureMsg = err ? (err as Error).toString() : ''; + failureOccurred = true; + } + return Promise.resolve(); + }; - // // run unittest execution - // const executionAdapter = new UnittestTestExecutionAdapter( - // configService, - // testOutputChannel.object, - // resultResolver, - // envVarsService, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // let collectedOutput = ''; - // testRun - // .setup((t) => t.appendOutput(typeMoq.It.isAny())) - // .callback((output: string) => { - // collectedOutput += output; - // traceLog('appendOutput was called with:', output); - // }) - // .returns(() => false); - // await executionAdapter - // .runTests( - // workspaceUri, - // ['test_parameterized_subtest.NumbersTest.test_even'], - // TestRunProfileKind.Run, - // testRun.object, - // pythonExecFactory, - // ) - // .then(() => { - // // verify that the _resolveExecution was called once per test - // assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); - // assert.strictEqual(failureOccurred, false, failureMsg); + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathLargeWorkspace); + configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // // verify output - // assert.ok( - // collectedOutput.includes('test_parameterized_subtest.py'), - // 'The test string does not contain the correct test name which should be printed', - // ); - // assert.ok( - // collectedOutput.includes('FAILED (failures=1000)'), - // 'The test string does not contain the last of the unittest output', - // ); - // }); - // }); - // test('pytest execution adapter small workspace with correct output', async () => { - // // result resolver and saved data for assertions - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // let failureOccurred = false; - // let failureMsg = ''; - // resultResolver._resolveExecution = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // // the payloads that get to the _resolveExecution are all data and should be successful. - // try { - // assert.strictEqual( - // payload.status, - // 'success', - // `Expected status to be 'success', instead status is ${payload.status}`, - // ); - // assert.ok(payload.result, 'Expected results to be present'); - // } catch (err) { - // failureMsg = err ? (err as Error).toString() : ''; - // failureOccurred = true; - // } - // return Promise.resolve(); - // }; - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathSmallWorkspace); - // configService.getSettings(workspaceUri).testing.pytestArgs = []; + // run unittest execution + const executionAdapter = new UnittestTestExecutionAdapter( + configService, + testOutputChannel.object, + resultResolver, + envVarsService, + ); + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); + await executionAdapter + .runTests( + workspaceUri, + ['test_parameterized_subtest.NumbersTest.test_even'], + TestRunProfileKind.Run, + testRun.object, + pythonExecFactory, + ) + .then(() => { + // verify that the _resolveExecution was called once per test + assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); + assert.strictEqual(failureOccurred, false, failureMsg); - // // run pytest execution - // const executionAdapter = new PytestTestExecutionAdapter( - // configService, - // testOutputChannel.object, - // resultResolver, - // envVarsService, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // let collectedOutput = ''; - // testRun - // .setup((t) => t.appendOutput(typeMoq.It.isAny())) - // .callback((output: string) => { - // collectedOutput += output; - // traceLog('appendOutput was called with:', output); - // }) - // .returns(() => false); - // await executionAdapter - // .runTests( - // workspaceUri, - // [`${rootPathSmallWorkspace}/test_simple.py::test_a`], - // TestRunProfileKind.Run, - // testRun.object, - // pythonExecFactory, - // ) - // .then(() => { - // // verify that the _resolveExecution was called once per test - // assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); - // assert.strictEqual(failureOccurred, false, failureMsg); + // verify output + assert.ok( + collectedOutput.includes('test_parameterized_subtest.py'), + 'The test string does not contain the correct test name which should be printed', + ); + assert.ok( + collectedOutput.includes('FAILED (failures=1000)'), + 'The test string does not contain the last of the unittest output', + ); + }); + }); + test('pytest execution adapter small workspace with correct output', async () => { + // result resolver and saved data for assertions + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + let failureOccurred = false; + let failureMsg = ''; + resultResolver._resolveExecution = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + // the payloads that get to the _resolveExecution are all data and should be successful. + try { + assert.strictEqual( + payload.status, + 'success', + `Expected status to be 'success', instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } catch (err) { + failureMsg = err ? (err as Error).toString() : ''; + failureOccurred = true; + } + return Promise.resolve(); + }; + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathSmallWorkspace); + configService.getSettings(workspaceUri).testing.pytestArgs = []; - // // verify output works for stdout and stderr as well as pytest output - // assert.ok( - // collectedOutput.includes('test session starts'), - // 'The test string does not contain the expected stdout output.', - // ); - // assert.ok( - // collectedOutput.includes('Captured log call'), - // 'The test string does not contain the expected log section.', - // ); - // const searchStrings = [ - // 'This is a warning message.', - // 'This is an error message.', - // 'This is a critical message.', - // ]; - // let searchString: string; - // for (searchString of searchStrings) { - // const count: number = (collectedOutput.match(new RegExp(searchString, 'g')) || []).length; - // assert.strictEqual( - // count, - // 2, - // `The test string does not contain two instances of ${searchString}. Should appear twice from logging output and stack trace`, - // ); - // } - // }); - // }); + // run pytest execution + const executionAdapter = new PytestTestExecutionAdapter( + configService, + testOutputChannel.object, + resultResolver, + envVarsService, + ); + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); + await executionAdapter + .runTests( + workspaceUri, + [`${rootPathSmallWorkspace}/test_simple.py::test_a`], + TestRunProfileKind.Run, + testRun.object, + pythonExecFactory, + ) + .then(() => { + // verify that the _resolveExecution was called once per test + assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); + assert.strictEqual(failureOccurred, false, failureMsg); - // test('Unittest execution with coverage, small workspace', async () => { - // // result resolver and saved data for assertions - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // resultResolver._resolveCoverage = async (payload, _token?) => { - // assert.strictEqual(payload.cwd, rootPathCoverageWorkspace, 'Expected cwd to be the workspace folder'); - // assert.ok(payload.result, 'Expected results to be present'); - // const simpleFileCov = payload.result[`${rootPathCoverageWorkspace}/even.py`]; - // assert.ok(simpleFileCov, 'Expected test_simple.py coverage to be present'); - // // since only one test was run, the other test in the same file will have missed coverage lines - // assert.strictEqual(simpleFileCov.lines_covered.length, 3, 'Expected 1 line to be covered in even.py'); - // assert.strictEqual(simpleFileCov.lines_missed.length, 1, 'Expected 3 lines to be missed in even.py'); - // return Promise.resolve(); - // }; + // verify output works for stdout and stderr as well as pytest output + assert.ok( + collectedOutput.includes('test session starts'), + 'The test string does not contain the expected stdout output.', + ); + assert.ok( + collectedOutput.includes('Captured log call'), + 'The test string does not contain the expected log section.', + ); + const searchStrings = [ + 'This is a warning message.', + 'This is an error message.', + 'This is a critical message.', + ]; + let searchString: string; + for (searchString of searchStrings) { + const count: number = (collectedOutput.match(new RegExp(searchString, 'g')) || []).length; + assert.strictEqual( + count, + 2, + `The test string does not contain two instances of ${searchString}. Should appear twice from logging output and stack trace`, + ); + } + }); + }); - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathCoverageWorkspace); - // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // // run execution - // const executionAdapter = new UnittestTestExecutionAdapter( - // configService, - // testOutputChannel.object, - // resultResolver, - // envVarsService, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // let collectedOutput = ''; - // testRun - // .setup((t) => t.appendOutput(typeMoq.It.isAny())) - // .callback((output: string) => { - // collectedOutput += output; - // traceLog('appendOutput was called with:', output); - // }) - // .returns(() => false); - // await executionAdapter - // .runTests( - // workspaceUri, - // ['test_even.TestNumbers.test_odd'], - // TestRunProfileKind.Coverage, - // testRun.object, - // pythonExecFactory, - // ) - // .finally(() => { - // assert.ok(collectedOutput, 'expect output to be collected'); - // }); - // }); - // test('pytest coverage execution, small workspace', async () => { - // // result resolver and saved data for assertions - // resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - // resultResolver._resolveCoverage = async (payload, _runInstance?) => { - // assert.strictEqual(payload.cwd, rootPathCoverageWorkspace, 'Expected cwd to be the workspace folder'); - // assert.ok(payload.result, 'Expected results to be present'); - // const simpleFileCov = payload.result[`${rootPathCoverageWorkspace}/even.py`]; - // assert.ok(simpleFileCov, 'Expected test_simple.py coverage to be present'); - // // since only one test was run, the other test in the same file will have missed coverage lines - // assert.strictEqual(simpleFileCov.lines_covered.length, 3, 'Expected 1 line to be covered in even.py'); - // assert.strictEqual(simpleFileCov.lines_missed.length, 1, 'Expected 3 lines to be missed in even.py'); + test('Unittest execution with coverage, small workspace', async () => { + // result resolver and saved data for assertions + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + resultResolver._resolveCoverage = async (payload, _token?) => { + assert.strictEqual(payload.cwd, rootPathCoverageWorkspace, 'Expected cwd to be the workspace folder'); + assert.ok(payload.result, 'Expected results to be present'); + const simpleFileCov = payload.result[`${rootPathCoverageWorkspace}/even.py`]; + assert.ok(simpleFileCov, 'Expected test_simple.py coverage to be present'); + // since only one test was run, the other test in the same file will have missed coverage lines + assert.strictEqual(simpleFileCov.lines_covered.length, 3, 'Expected 1 line to be covered in even.py'); + assert.strictEqual(simpleFileCov.lines_missed.length, 1, 'Expected 3 lines to be missed in even.py'); + return Promise.resolve(); + }; - // return Promise.resolve(); - // }; - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathCoverageWorkspace); - // configService.getSettings(workspaceUri).testing.pytestArgs = []; + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathCoverageWorkspace); + configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // run execution + const executionAdapter = new UnittestTestExecutionAdapter( + configService, + testOutputChannel.object, + resultResolver, + envVarsService, + ); + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); + await executionAdapter + .runTests( + workspaceUri, + ['test_even.TestNumbers.test_odd'], + TestRunProfileKind.Coverage, + testRun.object, + pythonExecFactory, + ) + .finally(() => { + assert.ok(collectedOutput, 'expect output to be collected'); + }); + }); + test('pytest coverage execution, small workspace', async () => { + // result resolver and saved data for assertions + resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + resultResolver._resolveCoverage = async (payload, _runInstance?) => { + assert.strictEqual(payload.cwd, rootPathCoverageWorkspace, 'Expected cwd to be the workspace folder'); + assert.ok(payload.result, 'Expected results to be present'); + const simpleFileCov = payload.result[`${rootPathCoverageWorkspace}/even.py`]; + assert.ok(simpleFileCov, 'Expected test_simple.py coverage to be present'); + // since only one test was run, the other test in the same file will have missed coverage lines + assert.strictEqual(simpleFileCov.lines_covered.length, 3, 'Expected 1 line to be covered in even.py'); + assert.strictEqual(simpleFileCov.lines_missed.length, 1, 'Expected 3 lines to be missed in even.py'); - // // run pytest execution - // const executionAdapter = new PytestTestExecutionAdapter( - // configService, - // testOutputChannel.object, - // resultResolver, - // envVarsService, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // let collectedOutput = ''; - // testRun - // .setup((t) => t.appendOutput(typeMoq.It.isAny())) - // .callback((output: string) => { - // collectedOutput += output; - // traceLog('appendOutput was called with:', output); - // }) - // .returns(() => false); - // await executionAdapter - // .runTests( - // workspaceUri, - // [`${rootPathCoverageWorkspace}/test_even.py::TestNumbers::test_odd`], - // TestRunProfileKind.Coverage, - // testRun.object, - // pythonExecFactory, - // ) - // .then(() => { - // assert.ok(collectedOutput, 'expect output to be collected'); - // }); - // }); - // test('pytest execution adapter large workspace', async () => { - // // result resolver and saved data for assertions - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // let failureOccurred = false; - // let failureMsg = ''; - // resultResolver._resolveExecution = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // // the payloads that get to the _resolveExecution are all data and should be successful. - // try { - // assert.strictEqual( - // payload.status, - // 'success', - // `Expected status to be 'success', instead status is ${payload.status}`, - // ); - // assert.ok(payload.result, 'Expected results to be present'); - // } catch (err) { - // failureMsg = err ? (err as Error).toString() : ''; - // failureOccurred = true; - // } - // return Promise.resolve(); - // }; + return Promise.resolve(); + }; + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathCoverageWorkspace); + configService.getSettings(workspaceUri).testing.pytestArgs = []; - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathLargeWorkspace); - // configService.getSettings(workspaceUri).testing.pytestArgs = []; + // run pytest execution + const executionAdapter = new PytestTestExecutionAdapter( + configService, + testOutputChannel.object, + resultResolver, + envVarsService, + ); + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); + await executionAdapter + .runTests( + workspaceUri, + [`${rootPathCoverageWorkspace}/test_even.py::TestNumbers::test_odd`], + TestRunProfileKind.Coverage, + testRun.object, + pythonExecFactory, + ) + .then(() => { + assert.ok(collectedOutput, 'expect output to be collected'); + }); + }); + test('pytest execution adapter large workspace', async () => { + // result resolver and saved data for assertions + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + let failureOccurred = false; + let failureMsg = ''; + resultResolver._resolveExecution = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + // the payloads that get to the _resolveExecution are all data and should be successful. + try { + assert.strictEqual( + payload.status, + 'success', + `Expected status to be 'success', instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } catch (err) { + failureMsg = err ? (err as Error).toString() : ''; + failureOccurred = true; + } + return Promise.resolve(); + }; - // // generate list of test_ids - // const testIds: string[] = []; - // for (let i = 0; i < 2000; i = i + 1) { - // const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; - // testIds.push(testId); - // } + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathLargeWorkspace); + configService.getSettings(workspaceUri).testing.pytestArgs = []; - // // run pytest execution - // const executionAdapter = new PytestTestExecutionAdapter( - // configService, - // testOutputChannel.object, - // resultResolver, - // envVarsService, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // let collectedOutput = ''; - // testRun - // .setup((t) => t.appendOutput(typeMoq.It.isAny())) - // .callback((output: string) => { - // collectedOutput += output; - // traceLog('appendOutput was called with:', output); - // }) - // .returns(() => false); - // await executionAdapter - // .runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory) - // .then(() => { - // // verify that the _resolveExecution was called once per test - // assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); - // assert.strictEqual(failureOccurred, false, failureMsg); + // generate list of test_ids + const testIds: string[] = []; + for (let i = 0; i < 2000; i = i + 1) { + const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; + testIds.push(testId); + } - // // verify output works for large repo - // assert.ok( - // collectedOutput.includes('test session starts'), - // 'The test string does not contain the expected stdout output from pytest.', - // ); - // }); - // }); + // run pytest execution + const executionAdapter = new PytestTestExecutionAdapter( + configService, + testOutputChannel.object, + resultResolver, + envVarsService, + ); + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); + await executionAdapter + .runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory) + .then(() => { + // verify that the _resolveExecution was called once per test + assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); + assert.strictEqual(failureOccurred, false, failureMsg); + + // verify output works for large repo + assert.ok( + collectedOutput.includes('test session starts'), + 'The test string does not contain the expected stdout output from pytest.', + ); + }); + }); test('unittest discovery adapter seg fault error handling', async () => { resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); let callCount = 0; From cca205cc5d0e73095729f8e786ed8649652a7750 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 30 Oct 2024 14:20:29 -0700 Subject: [PATCH 15/18] remove prints --- .../pytest/pytestExecutionAdapter.ts | 21 ------------------- .../testing/common/testingAdapter.test.ts | 3 --- 2 files changed, 24 deletions(-) diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 0e90861f2eab..90eaa890bcf1 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -38,12 +38,10 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { debugLauncher?: ITestDebugLauncher, interpreter?: PythonEnvironment, ): Promise { - console.log('EJFB running tests'); const deferredTillServerClose: Deferred = utils.createTestingDeferred(); // create callback to handle data received on the named pipe const dataReceivedCallback = (data: ExecutionTestPayload) => { - console.log('EJFB data received callback'); if (runInstance && !runInstance.token.isCancellationRequested) { this.resultResolver?.resolveExecution(data, runInstance); @@ -60,8 +58,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { cSource.token, // token to cancel ); runInstance?.token.onCancellationRequested(() => { - console.log('EJFB cancelation token hit!'); - traceInfo(`Test run cancelled, resolving 'TillServerClose' deferred for ${uri.fsPath}.`); const executionPayload: ExecutionTestPayload = { cwd: uri.fsPath, @@ -72,7 +68,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { }); try { - console.log('EJFB run tests new'); await this.runTestsNew( uri, testIds, @@ -85,8 +80,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { interpreter, ); } finally { - console.log('EJFB await finally'); - await deferredTillServerClose.promise; } @@ -111,7 +104,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { debugLauncher?: ITestDebugLauncher, interpreter?: PythonEnvironment, ): Promise { - console.log('EJFB running tests 2222'); const relativePathToPytest = 'python_files'; const fullPluginPath = path.join(EXTENSION_ROOT_DIR, relativePathToPytest); @@ -122,7 +114,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { const mutableEnv = { ...(await this.envVarsService?.getEnvironmentVariables(uri)), }; - console.log('EJFB after env vars service'); // get python path from mutable env, it contains process.env as well const pythonPathParts: string[] = mutableEnv.PYTHONPATH?.split(path.delimiter) ?? []; const pythonPathCommand = [fullPluginPath, ...pythonPathParts].join(path.delimiter); @@ -139,10 +130,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { resource: uri, interpreter, }; - console.log('EJFB before createActivatedEnvironment', executionFactory); // need to check what will happen in the exec service is NOT defined and is null const execService = await executionFactory?.createActivatedEnvironment(creationOptions); - console.log('EJFB after exec service'); try { // Remove positional test folders and files, we will add as needed per node let testArgs = removePositionalFoldersAndFiles(pytestArgs); @@ -159,7 +148,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // create a file with the test ids and set the environment variable to the file name const testIdsFileName = await utils.writeTestIdsFile(testIds); - console.log('EJFB after write test ids file'); mutableEnv.RUN_TEST_IDS_PIPE = testIdsFileName; traceInfo(`All environment variables set for pytest execution: ${JSON.stringify(mutableEnv)}`); @@ -186,7 +174,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { serverCancel.cancel(); }); } else { - console.log('EJFB before execObservable'); // deferredTillExecClose is resolved when all stdout and stderr is read const deferredTillExecClose: Deferred = utils.createTestingDeferred(); // combine path to run script with run args @@ -197,7 +184,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { let resultProc: ChildProcess | undefined; runInstance?.token.onCancellationRequested(() => { - console.log('EJFB run instance canceled'); traceInfo(`Test run cancelled, killing pytest subprocess for workspace ${uri.fsPath}`); // if the resultProc exists just call kill on it which will handle resolving the ExecClose deferred, otherwise resolve the deferred here. @@ -209,7 +195,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { } }); - console.log('EJFB before execObservable'); const result = execService?.execObservable(runArgs, spawnOptions); resultProc = result?.proc; @@ -220,16 +205,13 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { const out = utils.fixLogLinesNoTrailing(data.toString()); runInstance?.appendOutput(out); this.outputChannel?.append(out); - console.log('EJFB stdout', out); }); result?.proc?.stderr?.on('data', (data) => { const out = utils.fixLogLinesNoTrailing(data.toString()); runInstance?.appendOutput(out); this.outputChannel?.append(out); - console.log('EJFB stderr', out); }); result?.proc?.on('exit', (code, signal) => { - console.log('EJFB on exit'); this.outputChannel?.append(utils.MESSAGE_ON_TESTING_OUTPUT_MOVE); if (code !== 0 && testIds) { @@ -240,7 +222,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { }); result?.proc?.on('close', (code, signal) => { - console.log('EJFB on close'); traceVerbose('Test run finished, subprocess closed.'); // if the child has testIds then this is a run request @@ -266,8 +247,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { deferredTillExecClose.resolve(); serverCancel.cancel(); }); - console.log('EJFB awaiting deferredTillExecClose'); - await deferredTillExecClose.promise; } } catch (ex) { diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 6d6d56ce3468..65d65538c7c5 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -1169,7 +1169,6 @@ suite('End to End Tests: test adapters', () => { failureMsg = err ? (err as Error).toString() : ''; failureOccurred = true; } - console.log('EJFB returning promise.resolve'); return Promise.resolve(); }; @@ -1196,11 +1195,9 @@ suite('End to End Tests: test adapters', () => { onCancellationRequested: () => undefined, } as any), ); - console.log('EJFB, right before run tests'); await executionAdapter .runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory) .finally(() => { - console.log('EJFB executing assertions'); assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); assert.strictEqual(failureOccurred, false, failureMsg); }); From a23f8fa8e1cf07ee4b0c8cd08eca39b074c35635 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 30 Oct 2024 14:29:49 -0700 Subject: [PATCH 16/18] formatted --- .../testing/testController/pytest/pytestExecutionAdapter.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 90eaa890bcf1..8847738b65cd 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -42,7 +42,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // create callback to handle data received on the named pipe const dataReceivedCallback = (data: ExecutionTestPayload) => { - if (runInstance && !runInstance.token.isCancellationRequested) { this.resultResolver?.resolveExecution(data, runInstance); } else { @@ -104,7 +103,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { debugLauncher?: ITestDebugLauncher, interpreter?: PythonEnvironment, ): Promise { - const relativePathToPytest = 'python_files'; const fullPluginPath = path.join(EXTENSION_ROOT_DIR, relativePathToPytest); const settings = this.configSettings.getSettings(uri); @@ -184,7 +182,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { let resultProc: ChildProcess | undefined; runInstance?.token.onCancellationRequested(() => { - traceInfo(`Test run cancelled, killing pytest subprocess for workspace ${uri.fsPath}`); // if the resultProc exists just call kill on it which will handle resolving the ExecClose deferred, otherwise resolve the deferred here. if (resultProc) { @@ -212,7 +209,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { this.outputChannel?.append(out); }); result?.proc?.on('exit', (code, signal) => { - this.outputChannel?.append(utils.MESSAGE_ON_TESTING_OUTPUT_MOVE); if (code !== 0 && testIds) { traceError( @@ -222,7 +218,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { }); result?.proc?.on('close', (code, signal) => { - traceVerbose('Test run finished, subprocess closed.'); // if the child has testIds then this is a run request // if the child process exited with a non-zero exit code, then we need to send the error payload. From 4d11b2b2b6c7880684a7cb10ea9beb0ccb279a88 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Fri, 8 Nov 2024 11:11:46 -0800 Subject: [PATCH 17/18] update to pytestprovider --- .../testing/common/testingAdapter.test.ts | 81 +------------------ 1 file changed, 2 insertions(+), 79 deletions(-) diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 65d65538c7c5..24a34f8645ed 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -677,7 +677,7 @@ suite('End to End Tests: test adapters', () => { }); test('pytest execution adapter small workspace with correct output', async () => { // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); let callCount = 0; let failureOccurred = false; let failureMsg = ''; @@ -874,7 +874,7 @@ suite('End to End Tests: test adapters', () => { }); test('pytest execution adapter large workspace', async () => { // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); let callCount = 0; let failureOccurred = false; let failureMsg = ''; @@ -1061,83 +1061,6 @@ suite('End to End Tests: test adapters', () => { assert.strictEqual(failureOccurred, false, failureMsg); }); }); - // test('unittest execution adapter seg fault error handling', async () => { - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // let failureOccurred = false; - // let failureMsg = ''; - // resultResolver._resolveExecution = async (data, _token?) => { - // // do the following asserts for each time resolveExecution is called, should be called once per test. - // callCount = callCount + 1; - // traceLog(`unittest execution adapter seg fault error handling \n ${JSON.stringify(data)}`); - // try { - // if (data.status === 'error') { - // if (data.error === undefined) { - // // Dereference a NULL pointer - // const indexOfTest = JSON.stringify(data).search('Dereference a NULL pointer'); - // if (indexOfTest === -1) { - // failureOccurred = true; - // failureMsg = 'Expected test to have a null pointer'; - // } - // } else if (data.error.length === 0) { - // failureOccurred = true; - // failureMsg = "Expected errors in 'error' field"; - // } - // } else { - // const indexOfTest = JSON.stringify(data.result).search('error'); - // if (indexOfTest === -1) { - // failureOccurred = true; - // failureMsg = - // 'If payload status is not error then the individual tests should be marked as errors. This should occur on windows machines.'; - // } - // } - // if (data.result === undefined) { - // failureOccurred = true; - // failureMsg = 'Expected results to be present'; - // } - // // make sure the testID is found in the results - // const indexOfTest = JSON.stringify(data).search('test_seg_fault.TestSegmentationFault.test_segfault'); - // if (indexOfTest === -1) { - // failureOccurred = true; - // failureMsg = 'Expected testId to be present'; - // } - // } catch (err) { - // failureMsg = err ? (err as Error).toString() : ''; - // failureOccurred = true; - // } - // return Promise.resolve(); - // }; - - // const testId = `test_seg_fault.TestSegmentationFault.test_segfault`; - // const testIds: string[] = [testId]; - - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathErrorWorkspace); - // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - - // // run pytest execution - // const executionAdapter = new UnittestTestExecutionAdapter( - // configService, - // testOutputChannel.object, - // resultResolver, - // envVarsService, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // await executionAdapter - // .runTests(workspaceUri, testIds, TestRunProfileKind.Run, testRun.object, pythonExecFactory) - // .finally(() => { - // assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); - // assert.strictEqual(failureOccurred, false, failureMsg); - // }); - // }); test('pytest execution adapter seg fault error handling', async () => { resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); let callCount = 0; From b039f71866513b8649f032e1673ab4ea0b0b1dc8 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Fri, 8 Nov 2024 11:31:19 -0800 Subject: [PATCH 18/18] refinement --- .../testing/testController/common/utils.ts | 22 ------------------- .../pytest/pytestDiscoveryAdapter.ts | 7 ++---- 2 files changed, 2 insertions(+), 27 deletions(-) diff --git a/src/client/testing/testController/common/utils.ts b/src/client/testing/testController/common/utils.ts index 8ca5bf469c12..6c1492c2a9b7 100644 --- a/src/client/testing/testController/common/utils.ts +++ b/src/client/testing/testController/common/utils.ts @@ -169,28 +169,6 @@ export function pythonTestAdapterRewriteEnabled(serviceContainer: IServiceContai return experiment.inExperimentSync(EnableTestAdapterRewrite.experiment); } -// export async function startTestIdsNamedPipe(testIds: string[]): Promise { -// const pipeName: string = generateRandomPipeName('python-test-ids'); -// // uses callback so the on connect action occurs after the pipe is created -// await createNamedPipeServer(pipeName, ([_reader, writer]) => { -// traceVerbose('Test Ids named pipe connected'); -// // const num = await -// const msg = { -// jsonrpc: '2.0', -// params: testIds, -// } as Message; -// writer -// .write(msg) -// .then(() => { -// writer.end(); -// }) -// .catch((ex) => { -// traceError('Failed to write test ids to named pipe', ex); -// }); -// }); -// return pipeName; -// } - interface ExecutionResultMessage extends Message { params: ExecutionTestPayload; } diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index 5ae463d670c6..837d2bd8f6c0 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -45,11 +45,8 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { this.resultResolver?.resolveDiscovery(data); }); - try { - await this.runPytestDiscovery(uri, name, executionFactory, interpreter); - } finally { - traceVerbose('donee'); - } + await this.runPytestDiscovery(uri, name, executionFactory, interpreter); + // this is only a placeholder to handle function overloading until rewrite is finished const discoveryPayload: DiscoveredTestPayload = { cwd: uri.fsPath, status: 'success' }; return discoveryPayload;